language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pypa__warehouse
|
tests/unit/manage/test_forms.py
|
{
"start": 36691,
"end": 37311
}
|
class ____:
def test_validate(self):
organization_service = pretend.stub()
user_service = pretend.stub(find_userid=pretend.call_recorder(lambda userid: 1))
form = forms.CreateOrganizationRoleForm(
MultiDict({"username": "user", "role_name": "Owner"}),
orgtype="Company",
organization_service=organization_service,
user_service=user_service,
)
assert form.organization_service is organization_service
assert form.user_service is user_service
assert form.validate(), str(form.errors)
|
TestCreateOrganizationRoleForm
|
python
|
run-llama__llama_index
|
llama-index-finetuning/llama_index/finetuning/types.py
|
{
"start": 1305,
"end": 1618
}
|
class ____(ABC):
"""Base Cohere Reranker Finetuning Engine."""
@abstractmethod
def finetune(self) -> None:
"""Goes off and does stuff."""
@abstractmethod
def get_finetuned_model(self, top_n: int = 5) -> CohereRerank:
"""Gets finetuned model."""
|
BaseCohereRerankerFinetuningEngine
|
python
|
ray-project__ray
|
python/ray/llm/_internal/serve/utils/batcher.py
|
{
"start": 301,
"end": 3662
}
|
class ____(Generic[T]):
"""This class batches multiple responses from a generator into a list of
single responses, at some time interval.
Args:
generator: the async generator that this class pulls responses
from.
interval_ms: the interval at which this class yields the current batch.
If None, this class will batch all responses from the generator
together and yield the entire batch once.
"""
def __init__(
self,
generator: AsyncGenerator[T, None],
interval_ms: Optional[float] = MODEL_RESPONSE_BATCH_TIMEOUT_MS,
):
self.generator = generator
self.queue: asyncio.Queue = asyncio.Queue()
if interval_ms is None:
self.interval_s = None
else:
self.interval_s = interval_ms / 1000
if interval_ms == 0:
return
self.done_event: asyncio.Event = asyncio.Event()
# We are okay with this task getting cancelled (to propagate cancellations)
self.read_task = asyncio.create_task(self.read())
def _merge_results(self, results: List[T]) -> Iterable[T]:
return results
async def stream(self) -> AsyncGenerator[Iterable[T], None]:
"""Drain from the queue every interval_ms and yield the merged results"""
if self.interval_s == 0:
async for item in self.generator:
yield [item]
return
try:
while True:
# Wait for the interval or until we finish, whichever is faster.
# We use an event to avoid asyncio.wait_for cancelling the real task on timeout.
try:
if self.interval_s is None:
await self.done_event.wait()
else:
await asyncio.wait_for(
self.done_event.wait(), timeout=self.interval_s
)
except asyncio.TimeoutError:
pass
# Get all elements from the queue
results, is_done = self.check_done_and_drain()
# If there are results, merge and yield them
if results:
output = self._merge_results(results)
yield output
# If the read task is done, exit the stream task
if is_done:
# Raise exception, if any
self.read_task.result()
break
finally:
# If the stream task is done, make sure to exit the read task
if not self.read_task.done():
self.read_task.cancel()
def check_done_and_drain(self):
results = self.drain_queue()
return results, self.read_task.done()
async def read(self):
"""Read from the generator and put into the queue in a tight loop"""
try:
async for x in self.generator:
self.queue.put_nowait(x)
finally:
self.done_event.set()
def drain_queue(self):
"""Drain all results currently in the queue"""
results = []
try:
while True:
results.append(self.queue.get_nowait())
except asyncio.QueueEmpty:
pass
return results
|
Batcher
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/strategy_options.py
|
{
"start": 63002,
"end": 71898
}
|
class ____(_LoadElement):
"""Loader strategies against specific relationship or column paths.
e.g.::
joinedload(User.addresses)
defer(Order.name)
selectinload(User.orders).lazyload(Order.items)
"""
__slots__ = ("_of_type", "_path_with_polymorphic_path")
__visit_name__ = "attribute_strategy_load_element"
_traverse_internals = _LoadElement._traverse_internals + [
("_of_type", visitors.ExtendedInternalTraversal.dp_multi),
(
"_path_with_polymorphic_path",
visitors.ExtendedInternalTraversal.dp_has_cache_key,
),
]
_of_type: Union[Mapper[Any], AliasedInsp[Any], None]
_path_with_polymorphic_path: Optional[PathRegistry]
is_class_strategy = False
is_token_strategy = False
def _init_path(
self, path, attr, wildcard_key, attr_group, raiseerr, extra_criteria
):
assert attr is not None
self._of_type = None
self._path_with_polymorphic_path = None
insp, _, prop = _parse_attr_argument(attr)
if insp.is_property:
# direct property can be sent from internal strategy logic
# that sets up specific loaders, such as
# emit_lazyload->_lazyload_reverse
# prop = found_property = attr
prop = attr
path = path[prop]
if path.has_entity:
path = path.entity_path
return path
elif not insp.is_attribute:
# should not reach here;
assert False
# here we assume we have user-passed InstrumentedAttribute
if not orm_util._entity_corresponds_to_use_path_impl(
path[-1], attr.parent
):
if raiseerr:
if attr_group and attr is not attr_group[0]:
raise sa_exc.ArgumentError(
"Can't apply wildcard ('*') or load_only() "
"loader option to multiple entities in the "
"same option. Use separate options per entity."
)
else:
_raise_for_does_not_link(path, str(attr), attr.parent)
else:
return None
# note the essential logic of this attribute was very different in
# 1.4, where there were caching failures in e.g.
# test_relationship_criteria.py::RelationshipCriteriaTest::
# test_selectinload_nested_criteria[True] if an existing
# "_extra_criteria" on a Load object were replaced with that coming
# from an attribute. This appears to have been an artifact of how
# _UnboundLoad / Load interacted together, which was opaque and
# poorly defined.
if extra_criteria:
assert not attr._extra_criteria
self._extra_criteria = extra_criteria
else:
self._extra_criteria = attr._extra_criteria
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = inspect(ac)
self._of_type = ext_info
self._path_with_polymorphic_path = path.entity_path[prop]
path = path[prop][ext_info]
else:
path = path[prop]
if path.has_entity:
path = path.entity_path
return path
def _generate_extra_criteria(self, context):
"""Apply the current bound parameters in a QueryContext to the
immediate "extra_criteria" stored with this Load object.
Load objects are typically pulled from the cached version of
the statement from a QueryContext. The statement currently being
executed will have new values (and keys) for bound parameters in the
extra criteria which need to be applied by loader strategies when
they handle this criteria for a result set.
"""
assert (
self._extra_criteria
), "this should only be called if _extra_criteria is present"
orig_query = context.compile_state.select_statement
current_query = context.query
# NOTE: while it seems like we should not do the "apply" operation
# here if orig_query is current_query, skipping it in the "optimized"
# case causes the query to be different from a cache key perspective,
# because we are creating a copy of the criteria which is no longer
# the same identity of the _extra_criteria in the loader option
# itself. cache key logic produces a different key for
# (A, copy_of_A) vs. (A, A), because in the latter case it shortens
# the second part of the key to just indicate on identity.
# if orig_query is current_query:
# not cached yet. just do the and_()
# return and_(*self._extra_criteria)
k1 = orig_query._generate_cache_key()
k2 = current_query._generate_cache_key()
return k2._apply_params_to_element(k1, and_(*self._extra_criteria))
def _set_of_type_info(self, context, current_path):
assert self._path_with_polymorphic_path
pwpi = self._of_type
assert pwpi
if not pwpi.is_aliased_class:
pwpi = inspect(
orm_util.AliasedInsp._with_polymorphic_factory(
pwpi.mapper.base_mapper,
(pwpi.mapper,),
aliased=True,
_use_mapper_path=True,
)
)
start_path = self._path_with_polymorphic_path
if current_path:
new_path = self._adjust_effective_path_for_current_path(
start_path, current_path
)
if new_path is None:
return
start_path = new_path
key = ("path_with_polymorphic", start_path.natural_path)
if key in context:
existing_aliased_insp = context[key]
this_aliased_insp = pwpi
new_aliased_insp = existing_aliased_insp._merge_with(
this_aliased_insp
)
context[key] = new_aliased_insp
else:
context[key] = pwpi
def _prepare_for_compile_state(
self,
parent_loader,
compile_state,
mapper_entities,
reconciled_lead_entity,
raiseerr,
):
# _AttributeStrategyLoad
current_path = compile_state.current_path
is_refresh = compile_state.compile_options._for_refresh_state
assert not self.path.is_token
if is_refresh and not self.propagate_to_loaders:
return []
if self._of_type:
# apply additional with_polymorphic alias that may have been
# generated. this has to happen even if this is a defaultload
self._set_of_type_info(compile_state.attributes, current_path)
# omit setting loader attributes for a "defaultload" type of option
if not self.strategy and not self.local_opts:
return []
if raiseerr and not reconciled_lead_entity:
self._raise_for_no_match(parent_loader, mapper_entities)
if self.path.has_entity:
effective_path = self.path.parent
else:
effective_path = self.path
if current_path:
assert effective_path is not None
effective_path = self._adjust_effective_path_for_current_path(
effective_path, current_path
)
if effective_path is None:
return []
return [("loader", cast(PathRegistry, effective_path).natural_path)]
def __getstate__(self):
d = super().__getstate__()
# can't pickle this. See
# test_pickled.py -> test_lazyload_extra_criteria_not_supported
# where we should be emitting a warning for the usual case where this
# would be non-None
d["_extra_criteria"] = ()
if self._path_with_polymorphic_path:
d["_path_with_polymorphic_path"] = (
self._path_with_polymorphic_path.serialize()
)
if self._of_type:
if self._of_type.is_aliased_class:
d["_of_type"] = None
elif self._of_type.is_mapper:
d["_of_type"] = self._of_type.class_
else:
assert False, "unexpected object for _of_type"
return d
def __setstate__(self, state):
super().__setstate__(state)
if state.get("_path_with_polymorphic_path", None):
self._path_with_polymorphic_path = PathRegistry.deserialize(
state["_path_with_polymorphic_path"]
)
else:
self._path_with_polymorphic_path = None
if state.get("_of_type", None):
self._of_type = inspect(state["_of_type"])
else:
self._of_type = None
|
_AttributeStrategyLoad
|
python
|
kennethreitz__tablib
|
src/tablib/formats/_rst.py
|
{
"start": 633,
"end": 9210
}
|
class ____:
title = 'rst'
extensions = ('rst',)
MAX_TABLE_WIDTH = 80 # Roughly. It may be wider to avoid breaking words.
@classmethod
def _get_column_string_lengths(cls, dataset):
"""
Returns a list of string lengths of each column, and a list of
maximum word lengths.
"""
if dataset.headers:
column_lengths = [[len(h)] for h in dataset.headers]
word_lens = [_max_word_len(h) for h in dataset.headers]
else:
column_lengths = [[] for _ in range(dataset.width)]
word_lens = [0 for _ in range(dataset.width)]
for row in dataset.dict:
values = iter(row.values() if hasattr(row, 'values') else row)
for i, val in enumerate(values):
text = to_str(val)
column_lengths[i].append(len(text))
word_lens[i] = max(word_lens[i], _max_word_len(text))
return column_lengths, word_lens
@classmethod
def _row_to_lines(cls, values, widths, wrapper, sep='|', justify=JUSTIFY_LEFT):
"""
Returns a table row of wrapped values as a list of lines
"""
if justify not in JUSTIFY_VALUES:
raise ValueError('Value of "justify" must be one of "{}"'.format(
'", "'.join(JUSTIFY_VALUES)
))
if justify == JUSTIFY_LEFT:
just = lambda text, width: text.ljust(width)
elif justify == JUSTIFY_CENTER:
just = lambda text, width: text.center(width)
else:
just = lambda text, width: text.rjust(width)
lpad = sep + ' ' if sep else ''
rpad = ' ' + sep if sep else ''
pad = ' ' + sep + ' '
cells = []
for value, width in zip(values, widths):
wrapper.width = width
text = to_str(value)
cell = wrapper.wrap(text)
cells.append(cell)
lines = zip_longest(*cells, fillvalue='')
lines = (
(just(cell_line, widths[i]) for i, cell_line in enumerate(line))
for line in lines
)
lines = [''.join((lpad, pad.join(line), rpad)) for line in lines]
return lines
@classmethod
def _get_column_widths(cls, dataset, max_table_width=MAX_TABLE_WIDTH, pad_len=3):
"""
Returns a list of column widths proportional to the median length
of the text in their cells.
"""
str_lens, word_lens = cls._get_column_string_lengths(dataset)
median_lens = [int(median(lens)) for lens in str_lens]
total = sum(median_lens)
if total > max_table_width - (pad_len * len(median_lens)):
column_widths = (max_table_width * l // total for l in median_lens)
else:
column_widths = (l for l in median_lens)
# Allow for separator and padding:
column_widths = (w - pad_len if w > pad_len else w for w in column_widths)
# Rather widen table than break words:
column_widths = [max(w, l) for w, l in zip(column_widths, word_lens)]
return column_widths
@classmethod
def export_set_as_simple_table(cls, dataset, column_widths=None):
"""
Returns reStructuredText grid table representation of dataset.
"""
lines = []
wrapper = TextWrapper()
if column_widths is None:
column_widths = cls._get_column_widths(dataset, pad_len=2)
border = ' '.join(['=' * w for w in column_widths])
lines.append(border)
if dataset.headers:
lines.extend(cls._row_to_lines(
dataset.headers,
column_widths,
wrapper,
sep='',
justify=JUSTIFY_CENTER,
))
lines.append(border)
for row in dataset.dict:
values = iter(row.values() if hasattr(row, 'values') else row)
lines.extend(cls._row_to_lines(values, column_widths, wrapper, ''))
lines.append(border)
return '\n'.join(lines)
@classmethod
def export_set_as_grid_table(cls, dataset, column_widths=None):
"""
Returns reStructuredText grid table representation of dataset.
>>> from tablib import Dataset
>>> from tablib.formats import registry
>>> bits = ((0, 0), (1, 0), (0, 1), (1, 1))
>>> data = Dataset()
>>> data.headers = ['A', 'B', 'A and B']
>>> for a, b in bits:
... data.append([bool(a), bool(b), bool(a * b)])
>>> rst = registry.get_format('rst')
>>> print(rst.export_set(data, force_grid=True))
+-------+-------+-------+
| A | B | A and |
| | | B |
+=======+=======+=======+
| False | False | False |
+-------+-------+-------+
| True | False | False |
+-------+-------+-------+
| False | True | False |
+-------+-------+-------+
| True | True | True |
+-------+-------+-------+
"""
lines = []
wrapper = TextWrapper()
if column_widths is None:
column_widths = cls._get_column_widths(dataset)
header_sep = '+=' + '=+='.join(['=' * w for w in column_widths]) + '=+'
row_sep = '+-' + '-+-'.join(['-' * w for w in column_widths]) + '-+'
lines.append(row_sep)
if dataset.headers:
lines.extend(cls._row_to_lines(
dataset.headers,
column_widths,
wrapper,
justify=JUSTIFY_CENTER,
))
lines.append(header_sep)
for row in dataset.dict:
values = iter(row.values() if hasattr(row, 'values') else row)
lines.extend(cls._row_to_lines(values, column_widths, wrapper))
lines.append(row_sep)
return '\n'.join(lines)
@classmethod
def _use_simple_table(cls, head0, col0, width0):
"""
Use a simple table if the text in the first column is never wrapped
>>> from tablib.formats import registry
>>> rst = registry.get_format('rst')
>>> rst._use_simple_table('menu', ['egg', 'bacon'], 10)
True
>>> rst._use_simple_table(None, ['lobster thermidor', 'spam'], 10)
False
"""
if head0 is not None:
head0 = to_str(head0)
if len(head0) > width0:
return False
for cell in col0:
cell = to_str(cell)
if len(cell) > width0:
return False
return True
@classmethod
def export_set(cls, dataset, **kwargs):
"""
Returns reStructuredText table representation of dataset.
Returns a simple table if the text in the first column is never
wrapped, otherwise returns a grid table.
>>> from tablib import Dataset
>>> bits = ((0, 0), (1, 0), (0, 1), (1, 1))
>>> data = Dataset()
>>> data.headers = ['A', 'B', 'A and B']
>>> for a, b in bits:
... data.append([bool(a), bool(b), bool(a * b)])
>>> table = data.rst
>>> table.split('\\n') == [
... '===== ===== =====',
... ' A B A and',
... ' B ',
... '===== ===== =====',
... 'False False False',
... 'True False False',
... 'False True False',
... 'True True True ',
... '===== ===== =====',
... ]
True
"""
if not dataset.dict:
return ''
force_grid = kwargs.get('force_grid', False)
max_table_width = kwargs.get('max_table_width', cls.MAX_TABLE_WIDTH)
column_widths = cls._get_column_widths(dataset, max_table_width)
use_simple_table = cls._use_simple_table(
dataset.headers[0] if dataset.headers else None,
dataset.get_col(0),
column_widths[0],
)
if use_simple_table and not force_grid:
return cls.export_set_as_simple_table(dataset, column_widths)
else:
return cls.export_set_as_grid_table(dataset, column_widths)
@classmethod
def export_book(cls, databook):
"""
reStructuredText representation of a Databook.
Tables are separated by a blank line. All tables use the grid
format.
"""
return '\n\n'.join(cls.export_set(dataset, force_grid=True)
for dataset in databook._datasets)
|
ReSTFormat
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typedDictClosed3.py
|
{
"start": 1851,
"end": 1952
}
|
class ____(TypedDict, extra_items=int | None):
name: str
# This should generate an error.
|
MovieBase
|
python
|
plotly__plotly.py
|
plotly/graph_objs/box/_selected.py
|
{
"start": 233,
"end": 2366
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "box"
_path_str = "box.selected"
_valid_props = {"marker"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.box.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.box.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.box.selected.Marker`
instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.box.Selected`
marker
:class:`plotly.graph_objects.box.selected.Marker`
instance or dict with compatible properties
Returns
-------
Selected
"""
super().__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.box.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.box.Selected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Selected
|
python
|
coleifer__peewee
|
tests/model_sql.py
|
{
"start": 40130,
"end": 40210
}
|
class ____(Model):
class Meta:
database = compound_db
|
CompoundTestModel
|
python
|
automl__auto-sklearn
|
test/test_evaluation/test_test_evaluator.py
|
{
"start": 2978,
"end": 7609
}
|
class ____(unittest.TestCase):
def setUp(self):
self.queue = multiprocessing.Queue()
self.configuration = get_configuration_space(
DummyDatamanager()
).get_default_configuration()
self.data = get_multiclass_classification_datamanager()
self.tmp_dir = os.path.join(os.path.dirname(__file__), ".test_cv_functions")
self.backend = unittest.mock.Mock(spec=Backend)
self.backend.temporary_directory = tempfile.gettempdir()
self.backend.load_datamanager.return_value = self.data
self.dataset_name = json.dumps({"task_id": "test"})
self.port = logging.handlers.DEFAULT_TCP_LOGGING_PORT
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except Exception:
pass
def test_eval_test(self):
eval_t(
queue=self.queue,
backend=self.backend,
config=self.configuration,
metrics=[accuracy],
seed=test.conftest.DEFAULT_SEED,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=False,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
port=self.port,
additional_components=dict(),
)
return_value = read_queue(self.queue)
self.assertEqual(len(return_value), 1)
self.assertAlmostEqual(return_value[0]["loss"], 0.07999999999999996)
self.assertEqual(return_value[0]["status"], StatusType.SUCCESS)
self.assertNotIn("bac_metric", return_value[0]["additional_run_info"])
def test_eval_test_multi_objective(self):
metrics = {
accuracy: 0.07999999999999996,
balanced_accuracy: 0.05555555555555547,
}
eval_t(
queue=self.queue,
backend=self.backend,
config=self.configuration,
metrics=list(metrics.keys()),
seed=test.conftest.DEFAULT_SEED,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=False,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
port=self.port,
additional_components=dict(),
)
return_value = read_queue(self.queue)
self.assertEqual(len(return_value), 1)
for metric, loss in metrics.items():
self.assertAlmostEqual(return_value[0]["loss"][metric.name], loss)
self.assertEqual(return_value[0]["status"], StatusType.SUCCESS)
self.assertNotIn("bac_metric", return_value[0]["additional_run_info"])
def test_eval_test_all_loss_functions(self):
eval_t(
queue=self.queue,
backend=self.backend,
config=self.configuration,
metrics=[accuracy],
seed=1,
num_run=1,
scoring_functions=SCORER_LIST,
output_y_hat_optimization=False,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
port=self.port,
additional_components=dict(),
)
return_value = read_queue(self.queue)
self.assertEqual(len(return_value), 1)
# Note: All metric here should be minimized
fixture = {
"accuracy": 0.040000000000000036,
"balanced_accuracy": 0.02777777777777779,
"f1_macro": 0.0341005967604433,
"f1_micro": 0.040000000000000036,
"f1_weighted": 0.039693094629155934,
"log_loss": 0.13966929787769913,
"precision_macro": 0.03703703703703709,
"precision_micro": 0.040000000000000036,
"precision_weighted": 0.03555555555555556,
"recall_macro": 0.02777777777777779,
"recall_micro": 0.040000000000000036,
"recall_weighted": 0.040000000000000036,
"num_run": -1,
}
additional_run_info = return_value[0]["additional_run_info"]
for key, value in fixture.items():
self.assertAlmostEqual(additional_run_info[key], fixture[key], msg=key)
self.assertEqual(
len(additional_run_info),
len(fixture) + 1,
msg=sorted(additional_run_info.items()),
)
self.assertIn("duration", additional_run_info)
self.assertAlmostEqual(return_value[0]["loss"], 0.040000000000000036)
self.assertEqual(return_value[0]["status"], StatusType.SUCCESS)
|
FunctionsTest
|
python
|
wandb__wandb
|
wandb/apis/public/runs.py
|
{
"start": 16474,
"end": 52618
}
|
class ____(Attrs):
"""A single run associated with an entity and project.
Args:
client: The W&B API client.
entity: The entity associated with the run.
project: The project associated with the run.
run_id: The unique identifier for the run.
attrs: The attributes of the run.
include_sweeps: Whether to include sweeps in the run.
Attributes:
tags ([str]): a list of tags associated with the run
url (str): the url of this run
id (str): unique identifier for the run (defaults to eight characters)
name (str): the name of the run
state (str): one of: running, finished, crashed, killed, preempting, preempted
config (dict): a dict of hyperparameters associated with the run
created_at (str): ISO timestamp when the run was started
system_metrics (dict): the latest system metrics recorded for the run
summary (dict): A mutable dict-like property that holds the current summary.
Calling update will persist any changes.
project (str): the project associated with the run
entity (str): the name of the entity associated with the run
project_internal_id (int): the internal id of the project
user (str): the name of the user who created the run
path (str): Unique identifier [entity]/[project]/[run_id]
notes (str): Notes about the run
read_only (boolean): Whether the run is editable
history_keys (str): Keys of the history metrics that have been
logged with `wandb.log({"key": "value"})`
metadata (str): Metadata about the run from wandb-metadata.json
"""
def __init__(
self,
client: RetryingClient,
entity: str,
project: str,
run_id: str,
attrs: Mapping | None = None,
include_sweeps: bool = True,
lazy: bool = True,
api: public.Api | None = None,
):
"""Initialize a Run object.
Run is always initialized by calling api.runs() where api is an instance of
wandb.Api.
"""
_attrs = attrs or {}
super().__init__(dict(_attrs))
self.client = client
self._entity = entity
self.project = project
self._files = {}
self._base_dir = env.get_dir(tempfile.gettempdir())
self.id = run_id
self.sweep = None
self._include_sweeps = include_sweeps
self._lazy = lazy
self._full_data_loaded = False # Track if we've loaded full data
self.dir = os.path.join(self._base_dir, *self.path)
try:
os.makedirs(self.dir)
except OSError:
pass
self._summary = None
self._metadata: dict[str, Any] | None = None
self._state = _attrs.get("state", "not found")
self.server_provides_internal_id_field: bool | None = None
self._server_provides_project_id_field: bool | None = None
self._is_loaded: bool = False
self._api: public.Api | None = api
self.load(force=not _attrs)
@property
def state(self):
"""The state of the run. Can be one of: Finished, Failed, Crashed, or Running."""
return self._state
@property
def entity(self):
"""The entity associated with the run."""
return self._entity
@property
def username(self):
"""This API is deprecated. Use `entity` instead."""
wandb.termwarn("Run.username is deprecated. Please use Run.entity instead.")
return self._entity
@property
def storage_id(self):
"""The unique storage identifier for the run."""
# For compatibility with wandb.Run, which has storage IDs
# in self.storage_id and names in self.id.
return self._attrs.get("id")
@property
def id(self):
"""The unique identifier for the run."""
return self._attrs.get("name")
@id.setter
def id(self, new_id):
"""Set the unique identifier for the run."""
attrs = self._attrs
attrs["name"] = new_id
return new_id
@property
def name(self):
"""The name of the run."""
return self._attrs.get("displayName")
@name.setter
def name(self, new_name):
"""Set the name of the run."""
self._attrs["displayName"] = new_name
return new_name
@classmethod
def create(
cls,
api: public.Api,
run_id: str | None = None,
project: str | None = None,
entity: str | None = None,
state: Literal["running", "pending"] = "running",
):
"""Create a run for the given project."""
api._sentry.message("Invoking Run.create", level="info")
run_id = run_id or runid.generate_id()
project = project or api.settings.get("project") or "uncategorized"
mutation = gql(
"""
mutation UpsertBucket($project: String, $entity: String, $name: String!, $state: String) {
upsertBucket(input: {modelName: $project, entityName: $entity, name: $name, state: $state}) {
bucket {
project {
name
entity { name }
}
id
name
}
inserted
}
}
"""
)
variables = {
"entity": entity,
"project": project,
"name": run_id,
"state": state,
}
res = api.client.execute(mutation, variable_values=variables)
res = res["upsertBucket"]["bucket"]
return Run(
api.client,
res["project"]["entity"]["name"],
res["project"]["name"],
res["name"],
{
"id": res["id"],
"config": "{}",
"systemMetrics": "{}",
"summaryMetrics": "{}",
"tags": [],
"description": None,
"notes": None,
"state": state,
},
lazy=False, # Created runs should have full data available immediately
)
def _load_with_fragment(
self, fragment: str, fragment_name: str, force: bool = False
):
"""Load run data using specified GraphQL fragment."""
# Cache the server capability check to avoid repeated network calls
if self._server_provides_project_id_field is None:
self._server_provides_project_id_field = (
_server_provides_project_id_for_run(self.client)
)
query = gql(
f"""
query Run($project: String!, $entity: String!, $name: String!) {{
project(name: $project, entityName: $entity) {{
run(name: $name) {{
{"projectId" if self._server_provides_project_id_field else ""}
...{fragment_name}
}}
}}
}}
{fragment}
"""
)
if force or not self._attrs:
response = self._exec(query)
if (
response is None
or response.get("project") is None
or response["project"].get("run") is None
):
raise ValueError("Could not find run {}".format(self))
self._attrs = response["project"]["run"]
self._state = self._attrs["state"]
if self._attrs.get("user"):
self.user = public.User(self.client, self._attrs["user"])
if self._include_sweeps and self.sweep_name and not self.sweep:
# There may be a lot of runs. Don't bother pulling them all
# just for the sake of this one.
self.sweep = public.Sweep.get(
self.client,
self._api,
self.entity,
self.project,
self.sweep_name,
withRuns=False,
)
if not self._is_loaded or force:
# Always set _project_internal_id if projectId is available, regardless of fragment type
if "projectId" in self._attrs:
self._project_internal_id = int(self._attrs["projectId"])
else:
self._project_internal_id = None
# Always call _load_from_attrs when using the full fragment or when the fields are actually present
if fragment_name == RUN_FRAGMENT_NAME or (
"config" in self._attrs
or "summaryMetrics" in self._attrs
or "systemMetrics" in self._attrs
):
self._load_from_attrs()
# Only mark as loaded for lightweight fragments, not full fragments
if fragment_name == LIGHTWEIGHT_RUN_FRAGMENT_NAME:
self._is_loaded = True
return self._attrs
def _load_from_attrs(self):
self._state = self._attrs.get("state", None)
# Only convert fields if they exist in _attrs
if "config" in self._attrs:
self._attrs["config"] = _convert_to_dict(self._attrs.get("config"))
if "summaryMetrics" in self._attrs:
self._attrs["summaryMetrics"] = _convert_to_dict(
self._attrs.get("summaryMetrics")
)
if "systemMetrics" in self._attrs:
self._attrs["systemMetrics"] = _convert_to_dict(
self._attrs.get("systemMetrics")
)
# Only check for sweeps if sweep_name is available (not in lazy mode or if it exists)
if self._include_sweeps and self._attrs.get("sweepName") and not self.sweep:
# There may be a lot of runs. Don't bother pulling them all
self.sweep = public.Sweep(
self.client,
self.entity,
self.project,
self._attrs["sweepName"],
withRuns=False,
)
config_user, config_raw = {}, {}
if self._attrs.get("config"):
try:
# config is already converted to dict by _convert_to_dict
for key, value in self._attrs.get("config", {}).items():
config = config_raw if key in WANDB_INTERNAL_KEYS else config_user
if isinstance(value, dict) and "value" in value:
config[key] = value["value"]
else:
config[key] = value
except (TypeError, AttributeError):
# Handle case where config is malformed or not a dict
pass
config_raw.update(config_user)
self._attrs["config"] = config_user
self._attrs["rawconfig"] = config_raw
return self._attrs
def load(self, force=False):
"""Load run data using appropriate fragment based on lazy mode."""
if self._lazy:
return self._load_with_fragment(
LIGHTWEIGHT_RUN_FRAGMENT, LIGHTWEIGHT_RUN_FRAGMENT_NAME, force
)
else:
return self._load_with_fragment(RUN_FRAGMENT, RUN_FRAGMENT_NAME, force)
@normalize_exceptions
def wait_until_finished(self):
"""Check the state of the run until it is finished."""
query = gql(
"""
query RunState($project: String!, $entity: String!, $name: String!) {
project(name: $project, entityName: $entity) {
run(name: $name) {
state
}
}
}
"""
)
while True:
res = self._exec(query)
state = res["project"]["run"]["state"]
if state in ["finished", "crashed", "failed"]:
self._attrs["state"] = state
self._state = state
return
time.sleep(5)
@normalize_exceptions
def update(self):
"""Persist changes to the run object to the wandb backend."""
mutation = gql(
"""
mutation UpsertBucket($id: String!, $description: String, $display_name: String, $notes: String, $tags: [String!], $config: JSONString!, $groupName: String, $jobType: String) {{
upsertBucket(input: {{id: $id, description: $description, displayName: $display_name, notes: $notes, tags: $tags, config: $config, groupName: $groupName, jobType: $jobType}}) {{
bucket {{
...RunFragment
}}
}}
}}
{}
""".format(RUN_FRAGMENT)
)
_ = self._exec(
mutation,
id=self.storage_id,
tags=self.tags,
description=self.description,
notes=self.notes,
display_name=self.display_name,
config=self.json_config,
groupName=self.group,
jobType=self.job_type,
)
self.summary.update()
@normalize_exceptions
def delete(self, delete_artifacts=False):
"""Delete the given run from the wandb backend.
Args:
delete_artifacts (bool, optional): Whether to delete the artifacts
associated with the run.
"""
mutation = gql(
"""
mutation DeleteRun(
$id: ID!,
{}
) {{
deleteRun(input: {{
id: $id,
{}
}}) {{
clientMutationId
}}
}}
""".format(
"$deleteArtifacts: Boolean" if delete_artifacts else "",
"deleteArtifacts: $deleteArtifacts" if delete_artifacts else "",
)
)
self.client.execute(
mutation,
variable_values={
"id": self.storage_id,
"deleteArtifacts": delete_artifacts,
},
)
def save(self):
"""Persist changes to the run object to the W&B backend."""
self.update()
@property
def json_config(self):
"""Return the run config as a JSON string.
<!-- lazydoc-ignore: internal -->
"""
config = {}
if "_wandb" in self.rawconfig:
config["_wandb"] = {"value": self.rawconfig["_wandb"], "desc": None}
for k, v in self.config.items():
config[k] = {"value": v, "desc": None}
return json.dumps(config)
def _exec(self, query, **kwargs):
"""Execute a query against the cloud backend."""
variables = {"entity": self.entity, "project": self.project, "name": self.id}
variables.update(kwargs)
return self.client.execute(query, variable_values=variables)
def _sampled_history(self, keys, x_axis="_step", samples=500):
spec = {"keys": [x_axis] + keys, "samples": samples}
query = gql(
"""
query RunSampledHistory($project: String!, $entity: String!, $name: String!, $specs: [JSONString!]!) {
project(name: $project, entityName: $entity) {
run(name: $name) { sampledHistory(specs: $specs) }
}
}
"""
)
response = self._exec(query, specs=[json.dumps(spec)])
# sampledHistory returns one list per spec, we only send one spec
return response["project"]["run"]["sampledHistory"][0]
def _full_history(self, samples=500, stream="default"):
node = "history" if stream == "default" else "events"
query = gql(
"""
query RunFullHistory($project: String!, $entity: String!, $name: String!, $samples: Int) {{
project(name: $project, entityName: $entity) {{
run(name: $name) {{ {}(samples: $samples) }}
}}
}}
""".format(node)
)
response = self._exec(query, samples=samples)
return [json.loads(line) for line in response["project"]["run"][node]]
@normalize_exceptions
def files(
self,
names: list[str] | None = None,
pattern: str | None = None,
per_page: int = 50,
):
"""Returns a `Files` object for all files in the run which match the given criteria.
You can specify a list of exact file names to match, or a pattern to match against.
If both are provided, the pattern will be ignored.
Args:
names (list): names of the requested files, if empty returns all files
pattern (str, optional): Pattern to match when returning files from W&B.
This pattern uses mySQL's LIKE syntax,
so matching all files that end with .json would be "%.json".
If both names and pattern are provided, a ValueError will be raised.
per_page (int): number of results per page.
Returns:
A `Files` object, which is an iterator over `File` objects.
"""
return public.Files(
self.client,
self,
names or [],
pattern=pattern,
per_page=per_page,
)
@normalize_exceptions
def file(self, name):
"""Return the path of a file with a given name in the artifact.
Args:
name (str): name of requested file.
Returns:
A `File` matching the name argument.
"""
return public.Files(self.client, self, [name])[0]
@normalize_exceptions
def upload_file(self, path, root="."):
"""Upload a local file to W&B, associating it with this run.
Args:
path (str): Path to the file to upload. Can be absolute or relative.
root (str): The root path to save the file relative to. For example,
if you want to have the file saved in the run as "my_dir/file.txt"
and you're currently in "my_dir" you would set root to "../".
Defaults to current directory (".").
Returns:
A `File` object representing the uploaded file.
"""
api = InternalApi(
default_settings={"entity": self.entity, "project": self.project},
retry_timedelta=RETRY_TIMEDELTA,
)
api.set_current_run_id(self.id)
root = os.path.abspath(root)
name = os.path.relpath(path, root)
upload_path = util.make_file_path_upload_safe(name)
with open(os.path.join(root, name), "rb") as f:
api.push({LogicalPath(upload_path): f})
return public.Files(self.client, self, [name])[0]
@normalize_exceptions
def history(
self, samples=500, keys=None, x_axis="_step", pandas=True, stream="default"
):
"""Return sampled history metrics for a run.
This is simpler and faster if you are ok with the history records being sampled.
Args:
samples : (int, optional) The number of samples to return
pandas : (bool, optional) Return a pandas dataframe
keys : (list, optional) Only return metrics for specific keys
x_axis : (str, optional) Use this metric as the xAxis defaults to _step
stream : (str, optional) "default" for metrics, "system" for machine metrics
Returns:
pandas.DataFrame: If pandas=True returns a `pandas.DataFrame` of history
metrics.
list of dicts: If pandas=False returns a list of dicts of history metrics.
"""
if keys is not None and not isinstance(keys, list):
wandb.termerror("keys must be specified in a list")
return []
if keys is not None and len(keys) > 0 and not isinstance(keys[0], str):
wandb.termerror("keys argument must be a list of strings")
return []
if keys and stream != "default":
wandb.termerror("stream must be default when specifying keys")
return []
elif keys:
lines = self._sampled_history(keys=keys, x_axis=x_axis, samples=samples)
else:
lines = self._full_history(samples=samples, stream=stream)
if pandas:
pd = util.get_module("pandas")
if pd:
lines = pd.DataFrame.from_records(lines)
else:
wandb.termwarn("Unable to load pandas, call history with pandas=False")
return lines
@normalize_exceptions
def scan_history(self, keys=None, page_size=1000, min_step=None, max_step=None):
"""Returns an iterable collection of all history records for a run.
Args:
keys ([str], optional): only fetch these keys, and only fetch rows that have all of keys defined.
page_size (int, optional): size of pages to fetch from the api.
min_step (int, optional): the minimum number of pages to scan at a time.
max_step (int, optional): the maximum number of pages to scan at a time.
Returns:
An iterable collection over history records (dict).
Example:
Export all the loss values for an example run
```python
run = api.run("entity/project-name/run-id")
history = run.scan_history(keys=["Loss"])
losses = [row["Loss"] for row in history]
```
"""
if keys is not None and not isinstance(keys, list):
wandb.termerror("keys must be specified in a list")
return []
if keys is not None and len(keys) > 0 and not isinstance(keys[0], str):
wandb.termerror("keys argument must be a list of strings")
return []
last_step = self.lastHistoryStep
# set defaults for min/max step
if min_step is None:
min_step = 0
if max_step is None:
max_step = last_step + 1
# if the max step is past the actual last step, clamp it down
if max_step > last_step:
max_step = last_step + 1
if keys is None:
return public.HistoryScan(
run=self,
client=self.client,
page_size=page_size,
min_step=min_step,
max_step=max_step,
)
else:
return public.SampledHistoryScan(
run=self,
client=self.client,
keys=keys,
page_size=page_size,
min_step=min_step,
max_step=max_step,
)
@normalize_exceptions
def logged_artifacts(self, per_page: int = 100) -> public.RunArtifacts:
"""Fetches all artifacts logged by this run.
Retrieves all output artifacts that were logged during the run. Returns a
paginated result that can be iterated over or collected into a single list.
Args:
per_page: Number of artifacts to fetch per API request.
Returns:
An iterable collection of all Artifact objects logged as outputs during this run.
Example:
```python
import wandb
import tempfile
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as tmp:
tmp.write("This is a test artifact")
tmp_path = tmp.name
run = wandb.init(project="artifact-example")
artifact = wandb.Artifact("test_artifact", type="dataset")
artifact.add_file(tmp_path)
run.log_artifact(artifact)
run.finish()
api = wandb.Api()
finished_run = api.run(f"{run.entity}/{run.project}/{run.id}")
for logged_artifact in finished_run.logged_artifacts():
print(logged_artifact.name)
```
"""
return public.RunArtifacts(self.client, self, mode="logged", per_page=per_page)
@normalize_exceptions
def used_artifacts(self, per_page: int = 100) -> public.RunArtifacts:
"""Fetches artifacts explicitly used by this run.
Retrieves only the input artifacts that were explicitly declared as used
during the run, typically via `run.use_artifact()`. Returns a paginated
result that can be iterated over or collected into a single list.
Args:
per_page: Number of artifacts to fetch per API request.
Returns:
An iterable collection of Artifact objects explicitly used as inputs in this run.
Example:
```python
import wandb
run = wandb.init(project="artifact-example")
run.use_artifact("test_artifact:latest")
run.finish()
api = wandb.Api()
finished_run = api.run(f"{run.entity}/{run.project}/{run.id}")
for used_artifact in finished_run.used_artifacts():
print(used_artifact.name)
test_artifact
```
"""
return public.RunArtifacts(self.client, self, mode="used", per_page=per_page)
@normalize_exceptions
def use_artifact(self, artifact, use_as=None):
"""Declare an artifact as an input to a run.
Args:
artifact (`Artifact`): An artifact returned from
`wandb.Api().artifact(name)`
use_as (string, optional): A string identifying
how the artifact is used in the script. Used
to easily differentiate artifacts used in a
run, when using the beta wandb launch
feature's artifact swapping functionality.
Returns:
An `Artifact` object.
"""
api = InternalApi(
default_settings={"entity": self.entity, "project": self.project},
retry_timedelta=RETRY_TIMEDELTA,
)
api.set_current_run_id(self.id)
if isinstance(artifact, wandb.Artifact) and not artifact.is_draft():
api.use_artifact(
artifact.id,
use_as=use_as or artifact.name,
artifact_entity_name=artifact.entity,
artifact_project_name=artifact.project,
)
return artifact
elif isinstance(artifact, wandb.Artifact) and artifact.is_draft():
raise ValueError(
"Only existing artifacts are accepted by this api. "
"Manually create one with `wandb artifact put`"
)
else:
raise ValueError("You must pass a wandb.Api().artifact() to use_artifact")
@normalize_exceptions
def log_artifact(
self,
artifact: wandb.Artifact,
aliases: Collection[str] | None = None,
tags: Collection[str] | None = None,
):
"""Declare an artifact as output of a run.
Args:
artifact (`Artifact`): An artifact returned from
`wandb.Api().artifact(name)`.
aliases (list, optional): Aliases to apply to this artifact.
tags: (list, optional) Tags to apply to this artifact, if any.
Returns:
A `Artifact` object.
"""
api = InternalApi(
default_settings={"entity": self.entity, "project": self.project},
retry_timedelta=RETRY_TIMEDELTA,
)
api.set_current_run_id(self.id)
if not isinstance(artifact, wandb.Artifact):
raise TypeError("You must pass a wandb.Api().artifact() to use_artifact")
if artifact.is_draft():
raise ValueError(
"Only existing artifacts are accepted by this api. "
"Manually create one with `wandb artifact put`"
)
if (
self.entity != artifact.source_entity
or self.project != artifact.source_project
):
raise ValueError("A run can't log an artifact to a different project.")
artifact_collection_name = artifact.source_name.split(":")[0]
api.create_artifact(
artifact.type,
artifact_collection_name,
artifact.digest,
aliases=aliases,
tags=tags,
)
return artifact
def load_full_data(self, force: bool = False) -> dict[str, Any]:
"""Load full run data including heavy fields like config, systemMetrics, summaryMetrics.
This method is useful when you initially used lazy=True for listing runs,
but need access to the full data for specific runs.
Args:
force: Force reload even if data is already loaded
Returns:
The loaded run attributes
"""
if not self._lazy and not force:
# Already in full mode, no need to reload
return self._attrs
# Load full data and mark as loaded
result = self._load_with_fragment(RUN_FRAGMENT, RUN_FRAGMENT_NAME, force=True)
self._full_data_loaded = True
return result
@property
def config(self):
"""Get run config. Auto-loads full data if in lazy mode."""
if self._lazy and not self._full_data_loaded and "config" not in self._attrs:
self.load_full_data()
# Ensure config is always converted to dict (defensive against conversion issues)
config_value = self._attrs.get("config", {})
# _convert_to_dict handles dict inputs (noop) and converts str/bytes/bytearray to dict
config_value = _convert_to_dict(config_value)
self._attrs["config"] = config_value
return config_value
@property
def summary(self):
"""Get run summary metrics. Auto-loads full data if in lazy mode."""
if (
self._lazy
and not self._full_data_loaded
and "summaryMetrics" not in self._attrs
):
self.load_full_data()
if self._summary is None:
from wandb.old.summary import HTTPSummary
# TODO: fix the outdir issue
self._summary = HTTPSummary(self, self.client, summary=self.summary_metrics)
return self._summary
@property
def system_metrics(self):
"""Get run system metrics. Auto-loads full data if in lazy mode."""
if (
self._lazy
and not self._full_data_loaded
and "systemMetrics" not in self._attrs
):
self.load_full_data()
# Ensure systemMetrics is always converted to dict (defensive against conversion issues)
system_metrics_value = self._attrs.get("systemMetrics", {})
# _convert_to_dict handles dict inputs (noop) and converts str/bytes/bytearray to dict
system_metrics_value = _convert_to_dict(system_metrics_value)
self._attrs["systemMetrics"] = system_metrics_value
return system_metrics_value
@property
def summary_metrics(self):
"""Get run summary metrics. Auto-loads full data if in lazy mode."""
if (
self._lazy
and not self._full_data_loaded
and "summaryMetrics" not in self._attrs
):
self.load_full_data()
# Ensure summaryMetrics is always converted to dict (defensive against conversion issues)
summary_metrics_value = self._attrs.get("summaryMetrics", {})
# _convert_to_dict handles dict inputs (noop) and converts str/bytes/bytearray to dict
summary_metrics_value = _convert_to_dict(summary_metrics_value)
self._attrs["summaryMetrics"] = summary_metrics_value
return summary_metrics_value
@property
def rawconfig(self):
"""Get raw run config including internal keys. Auto-loads full data if in lazy mode."""
if self._lazy and not self._full_data_loaded and "rawconfig" not in self._attrs:
self.load_full_data()
return self._attrs.get("rawconfig", {})
@property
def sweep_name(self):
"""Get sweep name. Always available since sweepName is in lightweight fragment."""
# sweepName is included in lightweight fragment, so no need to load full data
return self._attrs.get("sweepName")
@property
def path(self):
"""The path of the run. The path is a list containing the entity, project, and run_id."""
return [
urllib.parse.quote_plus(str(self.entity)),
urllib.parse.quote_plus(str(self.project)),
urllib.parse.quote_plus(str(self.id)),
]
@property
def url(self):
"""The URL of the run.
The run URL is generated from the entity, project, and run_id. For
SaaS users, it takes the form of `https://wandb.ai/entity/project/run_id`.
"""
path = self.path
path.insert(2, "runs")
return self.client.app_url + "/".join(path)
@property
def metadata(self):
"""Metadata about the run from wandb-metadata.json.
Metadata includes the run's description, tags, start time, memory
usage and more.
"""
if self._metadata is None:
try:
f = self.file("wandb-metadata.json")
session = self.client._client.transport.session
response = session.get(f.url, timeout=5)
response.raise_for_status()
contents = response.content
self._metadata = json_util.loads(contents)
except: # noqa: E722
# file doesn't exist, or can't be downloaded, or can't be parsed
pass
return self._metadata
@property
def lastHistoryStep(self): # noqa: N802
"""Returns the last step logged in the run's history."""
query = gql(
"""
query RunHistoryKeys($project: String!, $entity: String!, $name: String!) {
project(name: $project, entityName: $entity) {
run(name: $name) { historyKeys }
}
}
"""
)
response = self._exec(query)
if (
response is None
or response.get("project") is None
or response["project"].get("run") is None
or response["project"]["run"].get("historyKeys") is None
):
return -1
history_keys = response["project"]["run"]["historyKeys"]
return history_keys["lastStep"] if "lastStep" in history_keys else -1
def to_html(self, height=420, hidden=False):
"""Generate HTML containing an iframe displaying this run."""
url = self.url + "?jupyter=true"
style = f"border:none;width:100%;height:{height}px;"
prefix = ""
if hidden:
style += "display:none;"
prefix = ipython.toggle_button()
return prefix + f"<iframe src={url!r} style={style!r}></iframe>"
def _repr_html_(self) -> str:
return self.to_html()
def __repr__(self):
return "<Run {} ({})>".format("/".join(self.path), self.state)
def _beta_scan_history(
self,
keys: list[str] | None = None,
page_size=1000,
min_step=0,
max_step=None,
) -> public.BetaHistoryScan:
"""Returns an iterable collection of all history records for a run.
This function is still in development and may not work as expected.
It uses wandb-core to read history from a run's exported
parquet history locally.
Args:
keys: list of metrics to read from the run's history.
if no keys are provided then all metrics will be returned.
page_size: the number of history records to read at a time.
min_step: The minimum step to start reading history from (inclusive).
max_step: The maximum step to read history up to (exclusive).
Returns:
A BetaHistoryScan object,
which can be iterator over to get history records.
"""
if self._api is None:
self._api = public.Api()
beta_history_scan = public.BetaHistoryScan(
api=self._api,
run=self,
min_step=min_step,
max_step=max_step or self.lastHistoryStep + 1,
keys=keys,
page_size=page_size,
)
return beta_history_scan
|
Run
|
python
|
pytorch__pytorch
|
torch/ao/quantization/quantizer/quantizer.py
|
{
"start": 2188,
"end": 2857
}
|
class ____(QuantizationSpecBase):
dtype: torch.dtype
scale: float
zero_point: int
quant_min: int | None = None
quant_max: int | None = None
qscheme: torch.qscheme | None = None
is_dynamic: bool = False
"""
The way we refer to other points of quantization in the graph will be either
an input edge or an output value
input edge is the connection between input node and the node consuming the input, so it's a Tuple[Node, Node]
output value is an fx Node
"""
EdgeOrNode = Annotated[tuple[Node, Node] | Node, None]
EdgeOrNode.__module__ = "torch.ao.quantization.quantizer.quantizer"
@dataclass(eq=True, frozen=True)
|
FixedQParamsQuantizationSpec
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py
|
{
"start": 1776,
"end": 3067
}
|
class ____(KeyValueParser):
"""Composite argument parser for controller key/value pairs."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
versions = get_controller_pythons(state.root_namespace.controller, False)
allow_default = bool(get_controller_pythons(state.root_namespace.controller, True))
allow_venv = isinstance(state.root_namespace.controller, OriginConfig) or not state.root_namespace.controller
return dict(
python=PythonParser(versions=versions, allow_venv=allow_venv, allow_default=allow_default),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section_name = 'controller options'
state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
f' python={PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=False, allow_default=True).document(state)} # non-origin controller',
f' python={PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=True, allow_default=True).document(state)} # origin controller',
])
return f'{{{section_name}}} # default'
|
ControllerKeyValueParser
|
python
|
scipy__scipy
|
scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py
|
{
"start": 1031,
"end": 3990
}
|
class ____(TestCase):
def test_2d_sphere_constraints(self):
# Interior initial point
ta, tb, intersect = sphere_intersections([0, 0],
[1, 0], 0.5)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# No intersection between line and circle
ta, tb, intersect = sphere_intersections([2, 0],
[0, 1], 1)
assert_equal(intersect, False)
# Outside initial point pointing toward outside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 1)
assert_equal(intersect, False)
# Outside initial point pointing toward inside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[-1, 0], 1.5)
assert_array_almost_equal([ta, tb], [0.5, 1])
assert_equal(intersect, True)
# Initial point on the boundary
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 2)
assert_array_almost_equal([ta, tb], [0, 0])
assert_equal(intersect, True)
def test_2d_sphere_constraints_line_intersections(self):
# Interior initial point
ta, tb, intersect = sphere_intersections([0, 0],
[1, 0], 0.5,
entire_line=True)
assert_array_almost_equal([ta, tb], [-0.5, 0.5])
assert_equal(intersect, True)
# No intersection between line and circle
ta, tb, intersect = sphere_intersections([2, 0],
[0, 1], 1,
entire_line=True)
assert_equal(intersect, False)
# Outside initial point pointing toward outside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 1,
entire_line=True)
assert_array_almost_equal([ta, tb], [-3, -1])
assert_equal(intersect, True)
# Outside initial point pointing toward inside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[-1, 0], 1.5,
entire_line=True)
assert_array_almost_equal([ta, tb], [0.5, 3.5])
assert_equal(intersect, True)
# Initial point on the boundary
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 2,
entire_line=True)
assert_array_almost_equal([ta, tb], [-4, 0])
assert_equal(intersect, True)
|
TestSphericalBoundariesIntersections
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-cassandra/tests/test_cassandra.py
|
{
"start": 415,
"end": 3672
}
|
class ____(unittest.TestCase):
@pytest.mark.skipif(not has_cassio, reason="cassio not installed")
def test_cassandra_create_and_crud(self) -> None:
mock_db_session = MagicMock()
try:
import cassio # noqa
except ModuleNotFoundError:
# mock `cassio` if not installed
mock_cassio = MagicMock()
sys.modules["cassio"] = mock_cassio
#
vector_store = CassandraVectorStore(
table="table",
embedding_dimension=2,
session=mock_db_session,
keyspace="keyspace",
ttl_seconds=123,
)
vector_store.add(
[
TextNode(
text="test node text",
id_="test node id",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc id")
},
embedding=[0.5, 0.5],
)
]
)
vector_store.delete("test node id")
vector_store.client
@pytest.mark.skipif(not has_cassio, reason="cassio not installed")
def test_cassandra_queries(self) -> None:
mock_db_session = MagicMock()
try:
import cassio # noqa
except ModuleNotFoundError:
# mock `cassio` if not installed
mock_cassio = MagicMock()
sys.modules["cassio"] = mock_cassio
#
vector_store = CassandraVectorStore(
table="table",
embedding_dimension=2,
session=mock_db_session,
keyspace="keyspace",
ttl_seconds=123,
)
# q1: default
query = VectorStoreQuery(
query_embedding=[1, 1],
similarity_top_k=3,
mode=VectorStoreQueryMode.DEFAULT,
)
vector_store.query(
query,
)
# q2: mmr, threshold in query takes precedence
query = VectorStoreQuery(
query_embedding=[1, 1],
similarity_top_k=3,
mode=VectorStoreQueryMode.MMR,
mmr_threshold=0.45,
)
vector_store.query(
query,
mmr_threshold=0.9,
)
# q3: mmr, threshold defined as param to `query`
query = VectorStoreQuery(
query_embedding=[1, 1],
similarity_top_k=3,
mode=VectorStoreQueryMode.MMR,
)
vector_store.query(
query,
mmr_threshold=0.9,
)
# q4: mmr, prefetch control
query = VectorStoreQuery(
query_embedding=[1, 1],
similarity_top_k=3,
mode=VectorStoreQueryMode.MMR,
)
vector_store.query(
query,
mmr_prefetch_factor=7.7,
)
# q5: mmr, conflicting prefetch control directives
query = VectorStoreQuery(
query_embedding=[1, 1],
similarity_top_k=3,
mode=VectorStoreQueryMode.MMR,
)
with pytest.raises(ValueError):
vector_store.query(
query,
mmr_prefetch_factor=7.7,
mmr_prefetch_k=80,
)
|
TestCassandraVectorStore
|
python
|
numpy__numpy
|
numpy/distutils/tests/test_misc_util.py
|
{
"start": 292,
"end": 1459
}
|
class ____:
def test_1(self):
assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name'))
assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name'))
assert_equal(appendpath('prefix', '/name'), join('prefix', 'name'))
def test_2(self):
assert_equal(appendpath('prefix/sub', 'name'),
join('prefix', 'sub', 'name'))
assert_equal(appendpath('prefix/sub', 'sup/name'),
join('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub', '/prefix/name'),
ajoin('prefix', 'sub', 'name'))
def test_3(self):
assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'),
ajoin('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
|
TestAppendpath
|
python
|
pypa__setuptools
|
setuptools/_distutils/filelist.py
|
{
"start": 408,
"end": 11942
}
|
class ____:
"""A list of files built by on exploring the filesystem and filtered by
applying various patterns to what we find there.
Instance attributes:
dir
directory from which files will be taken -- only used if
'allfiles' not supplied to constructor
files
list of filenames currently being built/filtered/manipulated
allfiles
complete list of files under consideration (ie. without any
filtering applied)
"""
def __init__(self, warn: object = None, debug_print: object = None) -> None:
# ignore argument to FileList, but keep them for backwards
# compatibility
self.allfiles: Iterable[str] | None = None
self.files: list[str] = []
def set_allfiles(self, allfiles: Iterable[str]) -> None:
self.allfiles = allfiles
def findall(self, dir: str | os.PathLike[str] = os.curdir) -> None:
self.allfiles = findall(dir)
def debug_print(self, msg: object) -> None:
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print(msg)
# Collection methods
def append(self, item: str) -> None:
self.files.append(item)
def extend(self, items: Iterable[str]) -> None:
self.files.extend(items)
def sort(self) -> None:
# Not a strict lexical sort!
sortable_files = sorted(map(os.path.split, self.files))
self.files = []
for sort_tuple in sortable_files:
self.files.append(os.path.join(*sort_tuple))
# Other miscellaneous utility methods
def remove_duplicates(self) -> None:
# Assumes list has been sorted!
for i in range(len(self.files) - 1, 0, -1):
if self.files[i] == self.files[i - 1]:
del self.files[i]
# "File template" methods
def _parse_template_line(self, line):
words = line.split()
action = words[0]
patterns = dir = dir_pattern = None
if action in ('include', 'exclude', 'global-include', 'global-exclude'):
if len(words) < 2:
raise DistutilsTemplateError(
f"'{action}' expects <pattern1> <pattern2> ..."
)
patterns = [convert_path(w) for w in words[1:]]
elif action in ('recursive-include', 'recursive-exclude'):
if len(words) < 3:
raise DistutilsTemplateError(
f"'{action}' expects <dir> <pattern1> <pattern2> ..."
)
dir = convert_path(words[1])
patterns = [convert_path(w) for w in words[2:]]
elif action in ('graft', 'prune'):
if len(words) != 2:
raise DistutilsTemplateError(
f"'{action}' expects a single <dir_pattern>"
)
dir_pattern = convert_path(words[1])
else:
raise DistutilsTemplateError(f"unknown action '{action}'")
return (action, patterns, dir, dir_pattern)
def process_template_line(self, line: str) -> None: # noqa: C901
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dir_pattern).
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
self.debug_print("include " + ' '.join(patterns))
for pattern in patterns:
if not self.include_pattern(pattern, anchor=True):
log.warning("warning: no files found matching '%s'", pattern)
elif action == 'exclude':
self.debug_print("exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.exclude_pattern(pattern, anchor=True):
log.warning(
"warning: no previously-included files found matching '%s'",
pattern,
)
elif action == 'global-include':
self.debug_print("global-include " + ' '.join(patterns))
for pattern in patterns:
if not self.include_pattern(pattern, anchor=False):
log.warning(
(
"warning: no files found matching '%s' "
"anywhere in distribution"
),
pattern,
)
elif action == 'global-exclude':
self.debug_print("global-exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.exclude_pattern(pattern, anchor=False):
log.warning(
(
"warning: no previously-included files matching "
"'%s' found anywhere in distribution"
),
pattern,
)
elif action == 'recursive-include':
self.debug_print("recursive-include {} {}".format(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.include_pattern(pattern, prefix=dir):
msg = "warning: no files found matching '%s' under directory '%s'"
log.warning(msg, pattern, dir)
elif action == 'recursive-exclude':
self.debug_print("recursive-exclude {} {}".format(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.exclude_pattern(pattern, prefix=dir):
log.warning(
(
"warning: no previously-included files matching "
"'%s' found under directory '%s'"
),
pattern,
dir,
)
elif action == 'graft':
self.debug_print("graft " + dir_pattern)
if not self.include_pattern(None, prefix=dir_pattern):
log.warning("warning: no directories found matching '%s'", dir_pattern)
elif action == 'prune':
self.debug_print("prune " + dir_pattern)
if not self.exclude_pattern(None, prefix=dir_pattern):
log.warning(
("no previously-included directories found matching '%s'"),
dir_pattern,
)
else:
raise DistutilsInternalError(
f"this cannot happen: invalid action '{action}'"
)
# Filtering/selection methods
@overload
def include_pattern(
self,
pattern: str,
anchor: bool = True,
prefix: str | None = None,
is_regex: Literal[False] = False,
) -> bool: ...
@overload
def include_pattern(
self,
pattern: str | re.Pattern[str],
anchor: bool = True,
prefix: str | None = None,
*,
is_regex: Literal[True],
) -> bool: ...
@overload
def include_pattern(
self,
pattern: str | re.Pattern[str],
anchor: bool,
prefix: str | None,
is_regex: Literal[True],
) -> bool: ...
def include_pattern(
self,
pattern: str | re.Pattern,
anchor: bool = True,
prefix: str | None = None,
is_regex: bool = False,
) -> bool:
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
are not quite the same as implemented by the 'fnmatch' module: '*'
and '?' match non-special characters, where "special" is platform-
dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return True if files are found, False otherwise.
"""
# XXX docstring lying about what the special chars are?
files_found = False
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
self.debug_print(f"include_pattern: applying regex r'{pattern_re.pattern}'")
# delayed loading of allfiles list
if self.allfiles is None:
self.findall()
for name in self.allfiles:
if pattern_re.search(name):
self.debug_print(" adding " + name)
self.files.append(name)
files_found = True
return files_found
@overload
def exclude_pattern(
self,
pattern: str,
anchor: bool = True,
prefix: str | None = None,
is_regex: Literal[False] = False,
) -> bool: ...
@overload
def exclude_pattern(
self,
pattern: str | re.Pattern[str],
anchor: bool = True,
prefix: str | None = None,
*,
is_regex: Literal[True],
) -> bool: ...
@overload
def exclude_pattern(
self,
pattern: str | re.Pattern[str],
anchor: bool,
prefix: str | None,
is_regex: Literal[True],
) -> bool: ...
def exclude_pattern(
self,
pattern: str | re.Pattern,
anchor: bool = True,
prefix: str | None = None,
is_regex: bool = False,
) -> bool:
"""Remove strings (presumably filenames) from 'files' that match
'pattern'. Other parameters are the same as for
'include_pattern()', above.
The list 'self.files' is modified in place.
Return True if files are found, False otherwise.
"""
files_found = False
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
self.debug_print(f"exclude_pattern: applying regex r'{pattern_re.pattern}'")
for i in range(len(self.files) - 1, -1, -1):
if pattern_re.search(self.files[i]):
self.debug_print(" removing " + self.files[i])
del self.files[i]
files_found = True
return files_found
# Utility functions
def _find_all_simple(path):
"""
Find all files under 'path'
"""
all_unique = _UniqueDirs.filter(os.walk(path, followlinks=True))
results = (
os.path.join(base, file) for base, dirs, files in all_unique for file in files
)
return filter(os.path.isfile, results)
|
FileList
|
python
|
plotly__plotly.py
|
plotly/graph_objs/surface/_lightposition.py
|
{
"start": 233,
"end": 3494
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "surface"
_path_str = "surface.lightposition"
_valid_props = {"x", "y", "z"}
@property
def x(self):
"""
Numeric vector, representing the X coordinate for each vertex.
The 'x' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Numeric vector, representing the Y coordinate for each vertex.
The 'y' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def z(self):
"""
Numeric vector, representing the Z coordinate for each vertex.
The 'z' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def _prop_descriptions(self):
return """\
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Lightposition object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.surface.Lightposition`
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
Returns
-------
Lightposition
"""
super().__init__("lightposition")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.surface.Lightposition
constructor must be a dict or
an instance of :class:`plotly.graph_objs.surface.Lightposition`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._set_property("z", arg, z)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Lightposition
|
python
|
mlflow__mlflow
|
mlflow/entities/multipart_upload.py
|
{
"start": 295,
"end": 727
}
|
class ____:
part_number: int
etag: str
url: str | None = None
@classmethod
def from_proto(cls, proto):
return cls(
proto.part_number,
proto.etag or None,
proto.url or None,
)
def to_dict(self):
return {
"part_number": self.part_number,
"etag": self.etag,
"url": self.url,
}
@dataclass
|
MultipartUploadPart
|
python
|
django__django
|
tests/utils_tests/test_choices.py
|
{
"start": 3380,
"end": 14017
}
|
class ____(SimpleTestCase):
expected = [
("C", _("Club")),
("D", _("Diamond")),
("H", _("Heart")),
("S", _("Spade")),
]
expected_nested = [
("Audio", [("vinyl", _("Vinyl")), ("cd", _("CD"))]),
("Video", [("vhs", _("VHS Tape")), ("dvd", _("DVD"))]),
("unknown", _("Unknown")),
]
invalid = [
1j,
123,
123.45,
"invalid",
b"invalid",
_("invalid"),
object(),
None,
True,
False,
]
invalid_iterable = [
# Special cases of a string-likes which would unpack incorrectly.
["ab"],
[b"ab"],
[_("ab")],
# Non-iterable items or iterable items with incorrect number of
# elements that cannot be unpacked.
[123],
[("value",)],
[("value", "label", "other")],
]
invalid_nested = [
# Nested choices can only be two-levels deep, so return callables,
# mappings, iterables, etc. at deeper levels unmodified.
[("Group", [("Value", lambda: "Label")])],
[("Group", [("Value", {"Label 1?": "Label 2?"})])],
[("Group", [("Value", [("Label 1?", "Label 2?")])])],
]
def test_empty(self):
def generator():
yield from ()
for choices in ({}, [], (), set(), frozenset(), generator()):
with self.subTest(choices=choices):
self.assertEqual(normalize_choices(choices), [])
def test_choices(self):
class Medal(TextChoices):
GOLD = "GOLD", _("Gold")
SILVER = "SILVER", _("Silver")
BRONZE = "BRONZE", _("Bronze")
expected = [
("GOLD", _("Gold")),
("SILVER", _("Silver")),
("BRONZE", _("Bronze")),
]
self.assertEqual(normalize_choices(Medal), expected)
def test_callable(self):
def get_choices():
return {
"C": _("Club"),
"D": _("Diamond"),
"H": _("Heart"),
"S": _("Spade"),
}
get_choices_spy = mock.Mock(wraps=get_choices)
output = normalize_choices(get_choices_spy)
get_choices_spy.assert_not_called()
self.assertIsInstance(output, CallableChoiceIterator)
self.assertEqual(output, self.expected)
get_choices_spy.assert_called_once()
def test_mapping(self):
choices = {
"C": _("Club"),
"D": _("Diamond"),
"H": _("Heart"),
"S": _("Spade"),
}
self.assertEqual(normalize_choices(choices), self.expected)
def test_iterable(self):
choices = [
("C", _("Club")),
("D", _("Diamond")),
("H", _("Heart")),
("S", _("Spade")),
]
self.assertEqual(normalize_choices(choices), self.expected)
def test_iterator(self):
def generator():
yield "C", _("Club")
yield "D", _("Diamond")
yield "H", _("Heart")
yield "S", _("Spade")
choices = generator()
self.assertEqual(normalize_choices(choices), self.expected)
def test_nested_callable(self):
def get_audio_choices():
return [("vinyl", _("Vinyl")), ("cd", _("CD"))]
def get_video_choices():
return [("vhs", _("VHS Tape")), ("dvd", _("DVD"))]
def get_media_choices():
return [
("Audio", get_audio_choices),
("Video", get_video_choices),
("unknown", _("Unknown")),
]
get_media_choices_spy = mock.Mock(wraps=get_media_choices)
output = normalize_choices(get_media_choices_spy)
get_media_choices_spy.assert_not_called()
self.assertIsInstance(output, CallableChoiceIterator)
self.assertEqual(output, self.expected_nested)
get_media_choices_spy.assert_called_once()
def test_nested_mapping(self):
choices = {
"Audio": {"vinyl": _("Vinyl"), "cd": _("CD")},
"Video": {"vhs": _("VHS Tape"), "dvd": _("DVD")},
"unknown": _("Unknown"),
}
self.assertEqual(normalize_choices(choices), self.expected_nested)
def test_nested_iterable(self):
choices = [
("Audio", [("vinyl", _("Vinyl")), ("cd", _("CD"))]),
("Video", [("vhs", _("VHS Tape")), ("dvd", _("DVD"))]),
("unknown", _("Unknown")),
]
self.assertEqual(normalize_choices(choices), self.expected_nested)
def test_nested_iterator(self):
def generate_audio_choices():
yield "vinyl", _("Vinyl")
yield "cd", _("CD")
def generate_video_choices():
yield "vhs", _("VHS Tape")
yield "dvd", _("DVD")
def generate_media_choices():
yield "Audio", generate_audio_choices()
yield "Video", generate_video_choices()
yield "unknown", _("Unknown")
choices = generate_media_choices()
self.assertEqual(normalize_choices(choices), self.expected_nested)
def test_callable_non_canonical(self):
# Canonical form is list of 2-tuple, but nested lists should work.
def get_choices():
return [
["C", _("Club")],
["D", _("Diamond")],
["H", _("Heart")],
["S", _("Spade")],
]
get_choices_spy = mock.Mock(wraps=get_choices)
output = normalize_choices(get_choices_spy)
get_choices_spy.assert_not_called()
self.assertIsInstance(output, CallableChoiceIterator)
self.assertEqual(output, self.expected)
get_choices_spy.assert_called_once()
def test_iterable_non_canonical(self):
# Canonical form is list of 2-tuple, but nested lists should work.
choices = [
["C", _("Club")],
["D", _("Diamond")],
["H", _("Heart")],
["S", _("Spade")],
]
self.assertEqual(normalize_choices(choices), self.expected)
def test_iterator_non_canonical(self):
# Canonical form is list of 2-tuple, but nested lists should work.
def generator():
yield ["C", _("Club")]
yield ["D", _("Diamond")]
yield ["H", _("Heart")]
yield ["S", _("Spade")]
choices = generator()
self.assertEqual(normalize_choices(choices), self.expected)
def test_nested_callable_non_canonical(self):
# Canonical form is list of 2-tuple, but nested lists should work.
def get_audio_choices():
return [["vinyl", _("Vinyl")], ["cd", _("CD")]]
def get_video_choices():
return [["vhs", _("VHS Tape")], ["dvd", _("DVD")]]
def get_media_choices():
return [
["Audio", get_audio_choices],
["Video", get_video_choices],
["unknown", _("Unknown")],
]
get_media_choices_spy = mock.Mock(wraps=get_media_choices)
output = normalize_choices(get_media_choices_spy)
get_media_choices_spy.assert_not_called()
self.assertIsInstance(output, CallableChoiceIterator)
self.assertEqual(output, self.expected_nested)
get_media_choices_spy.assert_called_once()
def test_nested_iterable_non_canonical(self):
# Canonical form is list of 2-tuple, but nested lists should work.
choices = [
["Audio", [["vinyl", _("Vinyl")], ["cd", _("CD")]]],
["Video", [["vhs", _("VHS Tape")], ["dvd", _("DVD")]]],
["unknown", _("Unknown")],
]
self.assertEqual(normalize_choices(choices), self.expected_nested)
def test_nested_iterator_non_canonical(self):
# Canonical form is list of 2-tuple, but nested lists should work.
def generator():
yield ["Audio", [["vinyl", _("Vinyl")], ["cd", _("CD")]]]
yield ["Video", [["vhs", _("VHS Tape")], ["dvd", _("DVD")]]]
yield ["unknown", _("Unknown")]
choices = generator()
self.assertEqual(normalize_choices(choices), self.expected_nested)
def test_nested_mixed_mapping_and_iterable(self):
# Although not documented, as it's better to stick to either mappings
# or iterables, nesting of mappings within iterables and vice versa
# works and is likely to occur in the wild. This is supported by the
# recursive call to `normalize_choices()` which will normalize nested
# choices.
choices = {
"Audio": [("vinyl", _("Vinyl")), ("cd", _("CD"))],
"Video": [("vhs", _("VHS Tape")), ("dvd", _("DVD"))],
"unknown": _("Unknown"),
}
self.assertEqual(normalize_choices(choices), self.expected_nested)
choices = [
("Audio", {"vinyl": _("Vinyl"), "cd": _("CD")}),
("Video", {"vhs": _("VHS Tape"), "dvd": _("DVD")}),
("unknown", _("Unknown")),
]
self.assertEqual(normalize_choices(choices), self.expected_nested)
def test_iterable_set(self):
# Although not documented, as sets are unordered which results in
# randomised order in form fields, passing a set of 2-tuples works.
# Consistent ordering of choices on model fields in migrations is
# enforced by the migrations serializer.
choices = {
("C", _("Club")),
("D", _("Diamond")),
("H", _("Heart")),
("S", _("Spade")),
}
self.assertEqual(sorted(normalize_choices(choices)), sorted(self.expected))
def test_unsupported_values_returned_unmodified(self):
# Unsupported values must be returned unmodified for model system check
# to work correctly.
for value in self.invalid + self.invalid_iterable + self.invalid_nested:
with self.subTest(value=value):
self.assertEqual(normalize_choices(value), value)
def test_unsupported_values_from_callable_returned_unmodified(self):
for value in self.invalid_iterable + self.invalid_nested:
with self.subTest(value=value):
self.assertEqual(normalize_choices(lambda: value), value)
def test_unsupported_values_from_iterator_returned_unmodified(self):
for value in self.invalid_nested:
with self.subTest(value=value):
self.assertEqual(
normalize_choices((lambda: (yield from value))()),
value,
)
|
NormalizeFieldChoicesTests
|
python
|
scikit-learn__scikit-learn
|
sklearn/exceptions.py
|
{
"start": 6176,
"end": 7703
}
|
class ____(UserWarning):
"""Warning raised when an estimator check from the common tests fails.
Parameters
----------
estimator : estimator object
Estimator instance for which the test failed.
check_name : str
Name of the check that failed.
exception : Exception
Exception raised by the failed check.
status : str
Status of the check.
expected_to_fail : bool
Whether the check was expected to fail.
expected_to_fail_reason : str
Reason for the expected failure.
"""
def __init__(
self,
*,
estimator,
check_name: str,
exception: Exception,
status: str,
expected_to_fail: bool,
expected_to_fail_reason: str,
):
self.estimator = estimator
self.check_name = check_name
self.exception = exception
self.status = status
self.expected_to_fail = expected_to_fail
self.expected_to_fail_reason = expected_to_fail_reason
def __repr__(self):
expected_to_fail_str = (
f"Expected to fail: {self.expected_to_fail_reason}"
if self.expected_to_fail
else "Not expected to fail"
)
return (
f"Test {self.check_name} failed for estimator {self.estimator!r}.\n"
f"Expected to fail reason: {expected_to_fail_str}\n"
f"Exception: {self.exception}"
)
def __str__(self):
return self.__repr__()
|
EstimatorCheckFailedWarning
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/events.py
|
{
"start": 153627,
"end": 158319
}
|
class ____(Request):
"""
Get the image for the next variant for the same iteration or for the next iteration
:param task: Task ID
:type task: str
:param scroll_id: Scroll ID from the previous call to get_debug_image_sample
:type scroll_id: str
:param navigate_earlier: If set then get the either previous variant event from
the current iteration or (if does not exist) the last variant event from the
previous iteration. Otherwise next variant event from the current iteration or
first variant event from the next iteration
:type navigate_earlier: bool
:param next_iteration: If set then navigate to the next/previous iteration
:type next_iteration: bool
:param model_events: If set then the retrieving model debug images. Otherwise
task debug images
:type model_events: bool
"""
_service = "events"
_action = "next_debug_image_sample"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"model_events": {
"default": False,
"description": "If set then the retrieving model debug images. Otherwise task debug images",
"type": "boolean",
},
"navigate_earlier": {
"description": "If set then get the either previous variant event from the current iteration or (if does not exist) the last variant event from the previous iteration.\n Otherwise next variant event from the current iteration or first variant event from the next iteration",
"type": "boolean",
},
"next_iteration": {
"default": False,
"description": "If set then navigate to the next/previous iteration",
"type": "boolean",
},
"scroll_id": {
"description": "Scroll ID from the previous call to get_debug_image_sample",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "scroll_id"],
"type": "object",
}
def __init__(
self,
task: str,
scroll_id: str,
navigate_earlier: Optional[bool] = None,
next_iteration: Optional[bool] = False,
model_events: Optional[bool] = False,
**kwargs: Any
) -> None:
super(NextDebugImageSampleRequest, self).__init__(**kwargs)
self.task = task
self.scroll_id = scroll_id
self.navigate_earlier = navigate_earlier
self.next_iteration = next_iteration
self.model_events = model_events
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("scroll_id")
def scroll_id(self) -> str:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: str) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("navigate_earlier")
def navigate_earlier(self) -> Optional[bool]:
return self._property_navigate_earlier
@navigate_earlier.setter
def navigate_earlier(self, value: Optional[bool]) -> None:
if value is None:
self._property_navigate_earlier = None
return
self.assert_isinstance(value, "navigate_earlier", (bool,))
self._property_navigate_earlier = value
@schema_property("next_iteration")
def next_iteration(self) -> Optional[bool]:
return self._property_next_iteration
@next_iteration.setter
def next_iteration(self, value: Optional[bool]) -> None:
if value is None:
self._property_next_iteration = None
return
self.assert_isinstance(value, "next_iteration", (bool,))
self._property_next_iteration = value
@schema_property("model_events")
def model_events(self) -> Optional[bool]:
return self._property_model_events
@model_events.setter
def model_events(self, value: Optional[bool]) -> None:
if value is None:
self._property_model_events = None
return
self.assert_isinstance(value, "model_events", (bool,))
self._property_model_events = value
|
NextDebugImageSampleRequest
|
python
|
getsentry__sentry
|
src/sentry/sentry_apps/api/endpoints/sentry_app_authorizations.py
|
{
"start": 1198,
"end": 1489
}
|
class ____(serializers.Serializer):
client_id = serializers.CharField(required=True, allow_null=False)
refresh_token = serializers.CharField(required=True, allow_null=False)
grant_type = serializers.CharField(required=True, allow_null=False)
|
SentryAppRefreshAuthorizationSerializer
|
python
|
getsentry__sentry
|
src/sentry/sentry_metrics/consumers/indexer/tags_validator.py
|
{
"start": 113,
"end": 810
}
|
class ____:
"""
This class is used to enforce the limits on tags that are received by the indexer.
"""
MAX_TAG_KEY_LENGTH = MAX_INDEXED_COLUMN_LENGTH
MAX_TAG_VALUE_LENGTH = MAX_INDEXED_COLUMN_LENGTH
def is_allowed(self, tags: Mapping[str, str] | None) -> bool:
"""
Returns True if the tags key value pairs are within limits.
"""
if tags is None:
return True
for key, value in tags.items():
if key is None or len(key) > self.MAX_TAG_KEY_LENGTH:
return False
if value is None or len(value) > self.MAX_TAG_VALUE_LENGTH:
return False
return True
|
TagsValidator
|
python
|
scrapy__scrapy
|
tests/CrawlerProcess/twisted_reactor_custom_settings_conflict.py
|
{
"start": 267,
"end": 626
}
|
class ____(scrapy.Spider):
name = "asyncio_reactor"
custom_settings = {
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
}
process = CrawlerProcess()
d1 = process.crawl(SelectReactorSpider)
d1.addErrback(log.err)
d2 = process.crawl(AsyncioReactorSpider)
d2.addErrback(log.err)
process.start()
|
AsyncioReactorSpider
|
python
|
huggingface__transformers
|
src/transformers/models/cohere2_vision/modular_cohere2_vision.py
|
{
"start": 13737,
"end": 14460
}
|
class ____(GotOcr2ImageProcessorFast):
size = {"height": 512, "width": 512}
min_patches = 1
max_patches = 12
crop_to_patches = True
patch_size = 16
valid_kwargs = Cohere2VisionFastImageProcessorKwargs
def __init__(self, **kwargs: Unpack[Cohere2VisionFastImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[Cohere2VisionFastImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
__all__ = [
"Cohere2VisionForConditionalGeneration",
"Cohere2VisionPreTrainedModel",
"Cohere2VisionModel",
"Cohere2VisionImageProcessorFast",
]
|
Cohere2VisionImageProcessorFast
|
python
|
getsentry__sentry
|
src/sentry/models/activity.py
|
{
"start": 3507,
"end": 7793
}
|
class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
project = FlexibleForeignKey("sentry.Project")
group = FlexibleForeignKey("sentry.Group", null=True)
# index on (type, ident)
type: models.Field[int | ActivityType, int] = BoundedPositiveIntegerField(choices=CHOICES)
ident = models.CharField(max_length=64, null=True)
# if the user is not set, it's assumed to be the system
user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL")
datetime = models.DateTimeField(default=timezone.now)
data = LegacyTextJSONField(default=dict, null=True)
objects: ClassVar[ActivityManager] = ActivityManager()
class Meta:
app_label = "sentry"
db_table = "sentry_activity"
indexes = (models.Index(fields=("project", "datetime")),)
__repr__ = sane_repr("project_id", "group_id", "event_id", "user_id", "type", "ident")
@staticmethod
def get_version_ident(version: str | None) -> str:
return (version or "")[:64]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
from sentry.models.release import Release
# XXX(dcramer): fix for bad data
if self.type in (ActivityType.RELEASE.value, ActivityType.DEPLOY.value) and isinstance(
self.data["version"], Release
):
self.data["version"] = self.data["version"].version
if self.type == ActivityType.ASSIGNED.value:
self.data["assignee"] = str(self.data["assignee"])
def save(self, *args: Any, **kwargs: Any) -> None:
created = bool(not self.id)
super().save(*args, **kwargs)
# The receiver for the post_save signal was not working in production, so just execute directly and safely
try:
from sentry.integrations.slack.tasks.send_notifications_on_activity import (
activity_created_receiver,
)
activity_created_receiver(self, created)
except Exception as err:
_default_logger.info(
"there was an error trying to kick off activity receiver",
exc_info=err,
extra={
"activity_id": self.id,
},
)
pass
if not created:
return
# HACK: support Group.num_comments
if self.type == ActivityType.NOTE.value and self.group is not None:
from sentry.models.group import Group
self.group.update(num_comments=F("num_comments") + 1)
if not options.get("groups.enable-post-update-signal"):
post_save.send_robust(
sender=Group, instance=self.group, created=True, update_fields=["num_comments"]
)
def delete(self, *args: Any, **kwargs: Any) -> tuple[int, dict[str, int]]:
result = super().delete(*args, **kwargs)
# HACK: support Group.num_comments
if self.type == ActivityType.NOTE.value and self.group is not None:
from sentry.models.group import Group
self.group.update(num_comments=F("num_comments") - 1)
if not options.get("groups.enable-post-update-signal"):
post_save.send_robust(
sender=Group, instance=self.group, created=True, update_fields=["num_comments"]
)
return result
def send_notification(self) -> None:
if self.group:
group_type = get_group_type_by_type_id(self.group.type)
has_status_change_notifications = group_type.enable_status_change_workflow_notifications
has_workflow_notifications = group_type.enable_workflow_notifications
is_status_change = self.type in {
activity.value for activity in STATUS_CHANGE_ACTIVITY_TYPES
}
# Skip sending the activity notification if the group type does not
# support status change workflow notifications
if (
is_status_change
and not has_status_change_notifications
or not has_workflow_notifications
):
return
activity.send_activity_notifications.delay(self.id)
|
Activity
|
python
|
langchain-ai__langchain
|
libs/partners/openai/langchain_openai/chat_models/base.py
|
{
"start": 87169,
"end": 137907
}
|
class ____(BaseChatOpenAI): # type: ignore[override]
r"""Interface to OpenAI chat model APIs.
???+ info "Setup"
Install `langchain-openai` and set environment variable `OPENAI_API_KEY`.
```bash
pip install -U langchain-openai
# or using uv
uv add langchain-openai
```
```bash
export OPENAI_API_KEY="your-api-key"
```
??? info "Key init args — completion params"
| Param | Type | Description |
| ------------------- | ------------- | ----------------------------------------------------------------------------------------------------------- |
| `model` | `str` | Name of OpenAI model to use. |
| `temperature` | `float` | Sampling temperature. |
| `max_tokens` | `int | None` | Max number of tokens to generate. |
| `logprobs` | `bool | None` | Whether to return logprobs. |
| `stream_options` | `dict` | Configure streaming outputs, like whether to return token usage when streaming (`{"include_usage": True}`). |
| `use_responses_api` | `bool | None` | Whether to use the responses API. |
See full list of supported init args and their descriptions below.
??? info "Key init args — client params"
| Param | Type | Description |
| -------------- | ------------------------------------------ | ----------------------------------------------------------------------------------- |
| `timeout` | `float | Tuple[float, float] | Any | None` | Timeout for requests. |
| `max_retries` | `int | None` | Max number of retries. |
| `api_key` | `str | None` | OpenAI API key. If not passed in will be read from env var `OPENAI_API_KEY`. |
| `base_url` | `str | None` | Base URL for API requests. Only specify if using a proxy or service emulator. |
| `organization` | `str | None` | OpenAI organization ID. If not passed in will be read from env var `OPENAI_ORG_ID`. |
See full list of supported init args and their descriptions below.
??? info "Instantiate"
Create a model instance with desired params. For example:
```python
from langchain_openai import ChatOpenAI
model = ChatOpenAI(
model="...",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
# api_key="...",
# base_url="...",
# organization="...",
# other params...
)
```
See all available params below.
!!! tip "Preserved params"
Any param which is not explicitly supported will be passed directly to
[`openai.OpenAI.chat.completions.create(...)`](https://platform.openai.com/docs/api-reference/chat/create)
every time to the model is invoked. For example:
```python
from langchain_openai import ChatOpenAI
import openai
ChatOpenAI(..., frequency_penalty=0.2).invoke(...)
# Results in underlying API call of:
openai.OpenAI(..).chat.completions.create(..., frequency_penalty=0.2)
# Which is also equivalent to:
ChatOpenAI(...).invoke(..., frequency_penalty=0.2)
```
??? info "Invoke"
Generate a response from the model:
```python
messages = [
(
"system",
"You are a helpful translator. Translate the user sentence to French.",
),
("human", "I love programming."),
]
model.invoke(messages)
```
Results in an `AIMessage` response:
```python
AIMessage(
content="J'adore la programmation.",
response_metadata={
"token_usage": {
"completion_tokens": 5,
"prompt_tokens": 31,
"total_tokens": 36,
},
"model_name": "gpt-4o",
"system_fingerprint": "fp_43dfabdef1",
"finish_reason": "stop",
"logprobs": None,
},
id="run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0",
usage_metadata={"input_tokens": 31, "output_tokens": 5, "total_tokens": 36},
)
```
??? info "Stream"
Stream a response from the model:
```python
for chunk in model.stream(messages):
print(chunk.text, end="")
```
Results in a sequence of `AIMessageChunk` objects with partial content:
```python
AIMessageChunk(content="", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
AIMessageChunk(content="J", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
AIMessageChunk(content="'adore", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
AIMessageChunk(content=" la", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
AIMessageChunk(
content=" programmation", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0"
)
AIMessageChunk(content=".", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
AIMessageChunk(
content="",
response_metadata={"finish_reason": "stop"},
id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0",
)
```
To collect the full message, you can concatenate the chunks:
```python
stream = model.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
```
```python
full = AIMessageChunk(
content="J'adore la programmation.",
response_metadata={"finish_reason": "stop"},
id="run-bf917526-7f58-4683-84f7-36a6b671d140",
)
```
??? info "Async"
Asynchronous equivalents of `invoke`, `stream`, and `batch` are also available:
```python
# Invoke
await model.ainvoke(messages)
# Stream
async for chunk in (await model.astream(messages))
# Batch
await model.abatch([messages])
```
Results in an `AIMessage` response:
```python
AIMessage(
content="J'adore la programmation.",
response_metadata={
"token_usage": {
"completion_tokens": 5,
"prompt_tokens": 31,
"total_tokens": 36,
},
"model_name": "gpt-4o",
"system_fingerprint": "fp_43dfabdef1",
"finish_reason": "stop",
"logprobs": None,
},
id="run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0",
usage_metadata={
"input_tokens": 31,
"output_tokens": 5,
"total_tokens": 36,
},
)
```
For batched calls, results in a `list[AIMessage]`.
??? info "Tool calling"
```python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(
..., description="The city and state, e.g. San Francisco, CA"
)
model_with_tools = model.bind_tools(
[GetWeather, GetPopulation]
# strict = True # Enforce tool args schema is respected
)
ai_msg = model_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
)
ai_msg.tool_calls
```
```python
[
{
"name": "GetWeather",
"args": {"location": "Los Angeles, CA"},
"id": "call_6XswGD5Pqk8Tt5atYr7tfenU",
},
{
"name": "GetWeather",
"args": {"location": "New York, NY"},
"id": "call_ZVL15vA8Y7kXqOy3dtmQgeCi",
},
{
"name": "GetPopulation",
"args": {"location": "Los Angeles, CA"},
"id": "call_49CFW8zqC9W7mh7hbMLSIrXw",
},
{
"name": "GetPopulation",
"args": {"location": "New York, NY"},
"id": "call_6ghfKxV264jEfe1mRIkS3PE7",
},
]
```
!!! note "Parallel tool calls"
[`openai >= 1.32`](https://pypi.org/project/openai/) supports a
`parallel_tool_calls` parameter that defaults to `True`. This parameter can
be set to `False` to disable parallel tool calls:
```python
ai_msg = model_with_tools.invoke(
"What is the weather in LA and NY?", parallel_tool_calls=False
)
ai_msg.tool_calls
```
```python
[
{
"name": "GetWeather",
"args": {"location": "Los Angeles, CA"},
"id": "call_4OoY0ZR99iEvC7fevsH8Uhtz",
}
]
```
Like other runtime parameters, `parallel_tool_calls` can be bound to a model
using `model.bind(parallel_tool_calls=False)` or during instantiation by
setting `model_kwargs`.
See `bind_tools` for more.
??? info "Built-in (server-side) tools"
You can access [built-in tools](https://platform.openai.com/docs/guides/tools?api-mode=responses)
supported by the OpenAI Responses API. See [LangChain docs](https://docs.langchain.com/oss/python/integrations/chat/openai#responses-api)
for more detail.
```python
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="...", output_version="responses/v1")
tool = {"type": "web_search"}
model_with_tools = model.bind_tools([tool])
response = model_with_tools.invoke("What was a positive news story from today?")
response.content
```
```python
[
{
"type": "text",
"text": "Today, a heartwarming story emerged from ...",
"annotations": [
{
"end_index": 778,
"start_index": 682,
"title": "Title of story",
"type": "url_citation",
"url": "<url of story>",
}
],
}
]
```
!!! version-added "Added in `langchain-openai` 0.3.9"
!!! version-added "Added in `langchain-openai` 0.3.26: Updated `AIMessage` format"
[`langchain-openai >= 0.3.26`](https://pypi.org/project/langchain-openai/#history)
allows users to opt-in to an updated `AIMessage` format when using the
Responses API. Setting `ChatOpenAI(..., output_version="responses/v1")` will
format output from reasoning summaries, built-in tool invocations, and other
response items into the message's `content` field, rather than
`additional_kwargs`. We recommend this format for new applications.
??? info "Managing conversation state"
OpenAI's Responses API supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).
Passing in response IDs from previous messages will continue a conversational
thread.
```python
from langchain_openai import ChatOpenAI
model = ChatOpenAI(
model="...",
use_responses_api=True,
output_version="responses/v1",
)
response = model.invoke("Hi, I'm Bob.")
response.text
```
```txt
"Hi Bob! How can I assist you today?"
```
```python
second_response = model.invoke(
"What is my name?",
previous_response_id=response.response_metadata["id"],
)
second_response.text
```
```txt
"Your name is Bob. How can I help you today, Bob?"
```
!!! version-added "Added in `langchain-openai` 0.3.9"
!!! version-added "Added in `langchain-openai` 0.3.26"
You can also initialize `ChatOpenAI` with `use_previous_response_id`.
Input messages up to the most recent response will then be dropped from request
payloads, and `previous_response_id` will be set using the ID of the most
recent response.
```python
model = ChatOpenAI(model="...", use_previous_response_id=True)
```
??? info "Reasoning output"
OpenAI's Responses API supports [reasoning models](https://platform.openai.com/docs/guides/reasoning?api-mode=responses)
that expose a summary of internal reasoning processes.
```python
from langchain_openai import ChatOpenAI
reasoning = {
"effort": "medium", # 'low', 'medium', or 'high'
"summary": "auto", # 'detailed', 'auto', or None
}
model = ChatOpenAI(
model="...", reasoning=reasoning, output_version="responses/v1"
)
response = model.invoke("What is 3^3?")
# Response text
print(f"Output: {response.text}")
# Reasoning summaries
for block in response.content:
if block["type"] == "reasoning":
for summary in block["summary"]:
print(summary["text"])
```
```txt
Output: 3³ = 27
Reasoning: The user wants to know...
```
!!! version-added "Added in `langchain-openai` 0.3.26: Updated `AIMessage` format"
[`langchain-openai >= 0.3.26`](https://pypi.org/project/langchain-openai/#history)
allows users to opt-in to an updated `AIMessage` format when using the
Responses API. Setting `ChatOpenAI(..., output_version="responses/v1")` will
format output from reasoning summaries, built-in tool invocations, and other
response items into the message's `content` field, rather than
`additional_kwargs`. We recommend this format for new applications.
??? info "Structured output"
```python
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: int | None = Field(
description="How funny the joke is, from 1 to 10"
)
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats")
```
```python
Joke(
setup="Why was the cat sitting on the computer?",
punchline="To keep an eye on the mouse!",
rating=None,
)
```
See `with_structured_output` for more info.
??? info "JSON mode"
```python
json_model = model.bind(response_format={"type": "json_object"})
ai_msg = json_model.invoke(
"Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]"
)
ai_msg.content
```
```txt
'\\n{\\n "random_ints": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]\\n}'
```
??? info "Image input"
```python
import base64
import httpx
from langchain.messages import HumanMessage
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
message = HumanMessage(
content=[
{"type": "text", "text": "describe the weather in this image"},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
},
]
)
ai_msg = model.invoke([message])
ai_msg.content
```
```txt
"The weather in the image appears to be clear and pleasant. The sky is mostly blue with scattered, light clouds, suggesting a sunny day with minimal cloud cover. There is no indication of rain or strong winds, and the overall scene looks bright and calm. The lush green grass and clear visibility further indicate good weather conditions."
```
??? info "Token usage"
```python
ai_msg = model.invoke(messages)
ai_msg.usage_metadata
```txt
{"input_tokens": 28, "output_tokens": 5, "total_tokens": 33}
```
When streaming, set the `stream_usage` kwarg:
```python
stream = model.stream(messages, stream_usage=True)
full = next(stream)
for chunk in stream:
full += chunk
full.usage_metadata
```
```txt
{"input_tokens": 28, "output_tokens": 5, "total_tokens": 33}
```
??? info "Logprobs"
```python
logprobs_model = model.bind(logprobs=True)
ai_msg = logprobs_model.invoke(messages)
ai_msg.response_metadata["logprobs"]
```
```txt
{
"content": [
{
"token": "J",
"bytes": [74],
"logprob": -4.9617593e-06,
"top_logprobs": [],
},
{
"token": "'adore",
"bytes": [39, 97, 100, 111, 114, 101],
"logprob": -0.25202933,
"top_logprobs": [],
},
{
"token": " la",
"bytes": [32, 108, 97],
"logprob": -0.20141791,
"top_logprobs": [],
},
{
"token": " programmation",
"bytes": [
32,
112,
114,
111,
103,
114,
97,
109,
109,
97,
116,
105,
111,
110,
],
"logprob": -1.9361265e-07,
"top_logprobs": [],
},
{
"token": ".",
"bytes": [46],
"logprob": -1.2233183e-05,
"top_logprobs": [],
},
]
}
```
??? info "Response metadata"
```python
ai_msg = model.invoke(messages)
ai_msg.response_metadata
```
```txt
{
"token_usage": {
"completion_tokens": 5,
"prompt_tokens": 28,
"total_tokens": 33,
},
"model_name": "gpt-4o",
"system_fingerprint": "fp_319be4768e",
"finish_reason": "stop",
"logprobs": None,
}
```
??? info "Flex processing"
OpenAI offers a variety of [service tiers](https://platform.openai.com/docs/guides/flex-processing?api-mode=responses).
The "flex" tier offers cheaper pricing for requests, with the trade-off that
responses may take longer and resources might not always be available.
This approach is best suited for non-critical tasks, including model testing,
data enhancement, or jobs that can be run asynchronously.
To use it, initialize the model with `service_tier="flex"`:
```python
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="...", service_tier="flex")
```
Note that this is a beta feature that is only available for a subset of models.
See OpenAI [flex processing docs](https://platform.openai.com/docs/guides/flex-processing?api-mode=responses)
for more detail.
??? info "OpenAI-compatible APIs"
`ChatOpenAI` can be used with OpenAI-compatible APIs like
[LM Studio](https://lmstudio.ai/), [vLLM](https://github.com/vllm-project/vllm),
[Ollama](https://ollama.com/), and others.
To use custom parameters specific to these providers, use the `extra_body` parameter.
!!! example "LM Studio example with TTL (auto-eviction)"
```python
from langchain_openai import ChatOpenAI
model = ChatOpenAI(
base_url="http://localhost:1234/v1",
api_key="lm-studio", # Can be any string
model="mlx-community/QwQ-32B-4bit",
temperature=0,
extra_body={
"ttl": 300
}, # Auto-evict model after 5 minutes of inactivity
)
```
!!! example "vLLM example with custom parameters"
```python
model = ChatOpenAI(
base_url="http://localhost:8000/v1",
api_key="EMPTY",
model="meta-llama/Llama-2-7b-chat-hf",
extra_body={"use_beam_search": True, "best_of": 4},
)
```
??? info "`model_kwargs` vs `extra_body`"
Use the correct parameter for different types of API arguments:
**Use `model_kwargs` for:**
- Standard OpenAI API parameters not explicitly defined as class parameters
- Parameters that should be flattened into the top-level request payload
- Examples: `max_completion_tokens`, `stream_options`, `modalities`, `audio`
```python
# Standard OpenAI parameters
model = ChatOpenAI(
model="...",
model_kwargs={
"stream_options": {"include_usage": True},
"max_completion_tokens": 300,
"modalities": ["text", "audio"],
"audio": {"voice": "alloy", "format": "wav"},
},
)
```
**Use `extra_body` for:**
- Custom parameters specific to OpenAI-compatible providers (vLLM, LM Studio,
OpenRouter, etc.)
- Parameters that need to be nested under `extra_body` in the request
- Any non-standard OpenAI API parameters
```python
# Custom provider parameters
model = ChatOpenAI(
base_url="http://localhost:8000/v1",
model="custom-model",
extra_body={
"use_beam_search": True, # vLLM parameter
"best_of": 4, # vLLM parameter
"ttl": 300, # LM Studio parameter
},
)
```
**Key Differences:**
- `model_kwargs`: Parameters are **merged into top-level** request payload
- `extra_body`: Parameters are **nested under `extra_body`** key in request
!!! warning
Always use `extra_body` for custom parameters, **not** `model_kwargs`.
Using `model_kwargs` for non-OpenAI parameters will cause API errors.
??? info "Prompt caching optimization"
For high-volume applications with repetitive prompts, use `prompt_cache_key`
per-invocation to improve cache hit rates and reduce costs:
```python
model = ChatOpenAI(model="...")
response = model.invoke(
messages,
prompt_cache_key="example-key-a", # Routes to same machine for cache hits
)
customer_response = model.invoke(messages, prompt_cache_key="example-key-b")
support_response = model.invoke(messages, prompt_cache_key="example-key-c")
# Dynamic cache keys based on context
cache_key = f"example-key-{dynamic_suffix}"
response = model.invoke(messages, prompt_cache_key=cache_key)
```
Cache keys help ensure requests with the same prompt prefix are routed to
machines with existing cache, providing cost reduction and latency improvement on
cached tokens.
""" # noqa: E501
max_tokens: int | None = Field(default=None, alias="max_completion_tokens")
"""Maximum number of tokens to generate."""
@property
def lc_secrets(self) -> dict[str, str]:
"""Mapping of secret environment variables."""
return {"openai_api_key": "OPENAI_API_KEY"}
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "chat_models", "openai"]`
"""
return ["langchain", "chat_models", "openai"]
@property
def lc_attributes(self) -> dict[str, Any]:
"""Get the attributes of the langchain object."""
attributes: dict[str, Any] = {}
if self.openai_organization:
attributes["openai_organization"] = self.openai_organization
if self.openai_api_base:
attributes["openai_api_base"] = self.openai_api_base
if self.openai_proxy:
attributes["openai_proxy"] = self.openai_proxy
return attributes
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by LangChain."""
return True
@property
def _default_params(self) -> dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
params = super()._default_params
if "max_tokens" in params:
params["max_completion_tokens"] = params.pop("max_tokens")
return params
def _get_request_payload(
self,
input_: LanguageModelInput,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> dict:
payload = super()._get_request_payload(input_, stop=stop, **kwargs)
# max_tokens was deprecated in favor of max_completion_tokens
# in September 2024 release
if "max_tokens" in payload:
payload["max_completion_tokens"] = payload.pop("max_tokens")
# Mutate system message role to "developer" for o-series models
if self.model_name and re.match(r"^o\d", self.model_name):
for message in payload.get("messages", []):
if message["role"] == "system":
message["role"] = "developer"
return payload
def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]:
"""Route to Chat Completions or Responses API."""
if self._use_responses_api({**kwargs, **self.model_kwargs}):
return super()._stream_responses(*args, **kwargs)
return super()._stream(*args, **kwargs)
async def _astream(
self, *args: Any, **kwargs: Any
) -> AsyncIterator[ChatGenerationChunk]:
"""Route to Chat Completions or Responses API."""
if self._use_responses_api({**kwargs, **self.model_kwargs}):
async for chunk in super()._astream_responses(*args, **kwargs):
yield chunk
else:
async for chunk in super()._astream(*args, **kwargs):
yield chunk
def with_structured_output(
self,
schema: _DictOrPydanticClass | None = None,
*,
method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
include_raw: bool = False,
strict: bool | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, _DictOrPydantic]:
r"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema. Can be passed in as:
- an OpenAI function/tool schema,
- a JSON Schema,
- a `TypedDict` class,
- or a Pydantic class.
If `schema` is a Pydantic class then the model output will be a
Pydantic instance of that class, and the model-generated fields will be
validated by the Pydantic class. Otherwise the model output will be a
dict and will not be validated.
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
more on how to properly specify types and descriptions of schema fields
when specifying a Pydantic or `TypedDict` class.
method: The method for steering model generation, one of:
- `'json_schema'`:
Uses OpenAI's [Structured Output API](https://platform.openai.com/docs/guides/structured-outputs).
See the docs for a list of supported models.
- `'function_calling'`:
Uses OpenAI's [tool-calling API](https://platform.openai.com/docs/guides/function-calling)
(formerly called function calling).
- `'json_mode'`:
Uses OpenAI's [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode).
Note that if using JSON mode then you must include instructions for
formatting the output into the desired schema into the model call.
Learn more about the [differences between methods](https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format).
include_raw:
If `False` then only the parsed structured output is returned.
If an error occurs during model output parsing it will be raised.
If `True` then both the raw model response (a `BaseMessage`) and the
parsed model response will be returned.
If an error occurs during output parsing it will be caught and returned
as well.
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
`'parsing_error'`.
strict:
- `True`:
Model output is guaranteed to exactly match the schema.
The input schema will also be validated according to the
[supported schemas](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas?api-mode=responses#supported-schemas).
- `False`:
Input schema will not be validated and model output will not be
validated.
- `None`:
`strict` argument will not be passed to the model.
If schema is specified via `TypedDict` or JSON schema, `strict` is not
enabled by default. Pass `strict=True` to enable it.
!!! note
`strict` can only be non-null if `method` is `'json_schema'` or `'function_calling'`.
tools:
A list of tool-like objects to bind to the chat model. Requires that:
- `method` is `'json_schema'` (default).
- `strict=True`
- `include_raw=True`
If a model elects to call a
tool, the resulting `AIMessage` in `'raw'` will include tool calls.
??? example
```python
from langchain.chat_models import init_chat_model
from pydantic import BaseModel
class ResponseSchema(BaseModel):
response: str
def get_weather(location: str) -> str:
\"\"\"Get weather at a location.\"\"\"
pass
model = init_chat_model("openai:gpt-4o-mini")
structured_model = model.with_structured_output(
ResponseSchema,
tools=[get_weather],
strict=True,
include_raw=True,
)
structured_model.invoke("What's the weather in Boston?")
```
```python
{
"raw": AIMessage(content="", tool_calls=[...], ...),
"parsing_error": None,
"parsed": None,
}
```
kwargs: Additional keyword args are passed through to the model.
Returns:
A `Runnable` that takes same inputs as a
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
`False` then `Runnable` outputs a `dict`.
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
- `'raw'`: `BaseMessage`
- `'parsed'`: `None` if there was a parsing error, otherwise the type
depends on the `schema` as described above.
- `'parsing_error'`: `BaseException | None`
!!! warning "Behavior changed in `langchain-openai` 0.3.0"
`method` default changed from `"function_calling"` to `"json_schema"`.
!!! warning "Behavior changed in `langchain-openai` 0.3.12"
Support for `tools` added.
!!! warning "Behavior changed in `langchain-openai` 0.3.21"
Pass `kwargs` through to the model.
??? note "Example: `schema=Pydantic` class, `method='json_schema'`, `include_raw=False`, `strict=True`"
Note, OpenAI has a number of restrictions on what types of schemas can be
provided if `strict = True`. When using Pydantic, our model cannot
specify any Field metadata (like min/max constraints) and fields cannot
have default values.
See [all constraints](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas).
```python
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str | None = Field(
default=..., description="A justification for the answer."
)
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
```
```python
AnswerWithJustification(
answer="They weigh the same",
justification="Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.",
)
```
??? note "Example: `schema=Pydantic` class, `method='function_calling'`, `include_raw=False`, `strict=False`"
```python
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str | None = Field(
default=..., description="A justification for the answer."
)
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification, method="function_calling"
)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
```
```python
AnswerWithJustification(
answer="They weigh the same",
justification="Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.",
)
```
??? note "Example: `schema=Pydantic` class, `method='json_schema'`, `include_raw=True`"
```python
from langchain_openai import ChatOpenAI
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification, include_raw=True
)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
```
```python
{
"raw": AIMessage(
content="",
additional_kwargs={
"tool_calls": [
{
"id": "call_Ao02pnFYXD6GN1yzc0uXPsvF",
"function": {
"arguments": '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}',
"name": "AnswerWithJustification",
},
"type": "function",
}
]
},
),
"parsed": AnswerWithJustification(
answer="They weigh the same.",
justification="Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.",
),
"parsing_error": None,
}
```
??? note "Example: `schema=TypedDict` class, `method='json_schema'`, `include_raw=False`, `strict=False`"
```python
from typing_extensions import Annotated, TypedDict
from langchain_openai import ChatOpenAI
class AnswerWithJustification(TypedDict):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: Annotated[
str | None, None, "A justification for the answer."
]
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
```
```python
{
"answer": "They weigh the same",
"justification": "Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.",
}
```
??? note "Example: `schema=OpenAI` function schema, `method='json_schema'`, `include_raw=False`"
```python
from langchain_openai import ChatOpenAI
oai_schema = {
"name": "AnswerWithJustification",
"description": "An answer to the user question along with justification for the answer.",
"parameters": {
"type": "object",
"properties": {
"answer": {"type": "string"},
"justification": {
"description": "A justification for the answer.",
"type": "string",
},
},
"required": ["answer"],
},
}
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(oai_schema)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
```
```python
{
"answer": "They weigh the same",
"justification": "Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.",
}
```
??? note "Example: `schema=Pydantic` class, `method='json_mode'`, `include_raw=True`"
```python
from langchain_openai import ChatOpenAI
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
answer: str
justification: str
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification, method="json_mode", include_raw=True
)
structured_model.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
```
```python
{
"raw": AIMessage(
content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'
),
"parsed": AnswerWithJustification(
answer="They are both the same weight.",
justification="Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.",
),
"parsing_error": None,
}
```
??? note "Example: `schema=None`, `method='json_mode'`, `include_raw=True`"
```python
structured_model = model.with_structured_output(
method="json_mode", include_raw=True
)
structured_model.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
```
```python
{
"raw": AIMessage(
content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'
),
"parsed": {
"answer": "They are both the same weight.",
"justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.",
},
"parsing_error": None,
}
```
""" # noqa: E501
return super().with_structured_output(
schema, method=method, include_raw=include_raw, strict=strict, **kwargs
)
def _is_pydantic_class(obj: Any) -> bool:
return isinstance(obj, type) and is_basemodel_subclass(obj)
def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
return {
"type": "function",
"id": tool_call["id"],
"function": {
"name": tool_call["name"],
"arguments": json.dumps(tool_call["args"], ensure_ascii=False),
},
}
def _lc_invalid_tool_call_to_openai_tool_call(
invalid_tool_call: InvalidToolCall,
) -> dict:
return {
"type": "function",
"id": invalid_tool_call["id"],
"function": {
"name": invalid_tool_call["name"],
"arguments": invalid_tool_call["args"],
},
}
def _url_to_size(image_source: str) -> tuple[int, int] | None:
try:
from PIL import Image # type: ignore[import]
except ImportError:
logger.info(
"Unable to count image tokens. To count image tokens please install "
"`pip install -U pillow httpx`."
)
return None
if _is_url(image_source):
try:
import httpx
except ImportError:
logger.info(
"Unable to count image tokens. To count image tokens please install "
"`pip install -U httpx`."
)
return None
response = httpx.get(image_source)
response.raise_for_status()
width, height = Image.open(BytesIO(response.content)).size
return width, height
if _is_b64(image_source):
_, encoded = image_source.split(",", 1)
data = base64.b64decode(encoded)
width, height = Image.open(BytesIO(data)).size
return width, height
return None
def _count_image_tokens(width: int, height: int) -> int:
# Reference: https://platform.openai.com/docs/guides/vision/calculating-costs
width, height = _resize(width, height)
h = ceil(height / 512)
w = ceil(width / 512)
return (170 * h * w) + 85
def _is_url(s: str) -> bool:
try:
result = urlparse(s)
return all([result.scheme, result.netloc])
except Exception as e:
logger.debug("Unable to parse URL: %s", e)
return False
def _is_b64(s: str) -> bool:
return s.startswith("data:image")
def _resize(width: int, height: int) -> tuple[int, int]:
# larger side must be <= 2048
if width > 2048 or height > 2048:
if width > height:
height = (height * 2048) // width
width = 2048
else:
width = (width * 2048) // height
height = 2048
# smaller side must be <= 768
if width > 768 and height > 768:
if width > height:
width = (width * 768) // height
height = 768
else:
height = (width * 768) // height
width = 768
return width, height
def _convert_to_openai_response_format(
schema: dict[str, Any] | type, *, strict: bool | None = None
) -> dict | TypeBaseModel:
if isinstance(schema, type) and is_basemodel_subclass(schema):
return schema
if (
isinstance(schema, dict)
and "json_schema" in schema
and schema.get("type") == "json_schema"
):
response_format = schema
elif isinstance(schema, dict) and "name" in schema and "schema" in schema:
response_format = {"type": "json_schema", "json_schema": schema}
else:
if strict is None:
if isinstance(schema, dict) and isinstance(schema.get("strict"), bool):
strict = schema["strict"]
else:
strict = False
function = convert_to_openai_function(schema, strict=strict)
function["schema"] = function.pop("parameters")
response_format = {"type": "json_schema", "json_schema": function}
if (
strict is not None
and strict is not response_format["json_schema"].get("strict")
and isinstance(schema, dict)
and "strict" in schema.get("json_schema", {})
):
msg = (
f"Output schema already has 'strict' value set to "
f"{schema['json_schema']['strict']} but 'strict' also passed in to "
f"with_structured_output as {strict}. Please make sure that "
f"'strict' is only specified in one place."
)
raise ValueError(msg)
return response_format
def _oai_structured_outputs_parser(
ai_msg: AIMessage, schema: type[_BM]
) -> PydanticBaseModel | None:
if parsed := ai_msg.additional_kwargs.get("parsed"):
if isinstance(parsed, dict):
return schema(**parsed)
return parsed
if ai_msg.additional_kwargs.get("refusal"):
raise OpenAIRefusalError(ai_msg.additional_kwargs["refusal"])
if any(
isinstance(block, dict)
and block.get("type") == "non_standard"
and "refusal" in block["value"]
for block in ai_msg.content
):
refusal = next(
block["value"]["refusal"]
for block in ai_msg.content
if isinstance(block, dict)
and block["type"] == "non_standard"
and "refusal" in block["value"]
)
raise OpenAIRefusalError(refusal)
if ai_msg.tool_calls:
return None
msg = (
"Structured Output response does not have a 'parsed' field nor a 'refusal' "
f"field. Received message:\n\n{ai_msg}"
)
raise ValueError(msg)
|
ChatOpenAI
|
python
|
getsentry__sentry
|
tests/integration/test_api.py
|
{
"start": 531,
"end": 5323
}
|
class ____(AuthProviderTestCase):
def setUp(self) -> None:
self.organization = self.create_organization(name="foo")
self.user = self.create_user("foobar@example.com", is_superuser=False)
team = self.create_team(name="bar", organization=self.organization)
self.project = self.create_project(name="baz", organization=self.organization, teams=[team])
member = self.create_member(user=self.user, organization=self.organization, teams=[team])
member.flags["sso:linked"] = True
member.save()
event = self.store_event(data={}, project_id=self.project.id)
group_id = event.group_id
with assume_test_silo_mode(SiloMode.CONTROL):
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy", flags=0
)
AuthIdentity.objects.create(auth_provider=auth_provider, user=self.user)
self.login_as(self.user)
self.paths = (
f"/api/0/organizations/{self.organization.slug}/",
f"/api/0/projects/{self.organization.slug}/{self.project.slug}/",
f"/api/0/teams/{self.organization.slug}/{self.team.slug}/",
f"/api/0/issues/{group_id}/",
# this uses the internal API, which once upon a time was broken
f"/api/0/issues/{group_id}/events/latest/",
)
def test_sso_auth_required(self) -> None:
# we should be redirecting the user to the authentication form as they
# haven't verified this specific organization
self._test_paths_with_status(401)
def test_sso_superuser_required(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
# superuser should still require SSO as they're a member of the org
self.user.update(is_superuser=True)
self._test_paths_with_status(401)
def test_sso_with_expiry_valid(self) -> None:
sso_session = SsoSession.create(self.organization.id)
self.session[sso_session.session_key] = sso_session.to_dict()
self.save_session()
self._test_paths_with_status(200)
def test_sso_with_expiry_expired(self) -> None:
sso_session_expired = SsoSession(
self.organization.id,
datetime.now(tz=timezone.utc) - SSO_EXPIRY_TIME - timedelta(hours=1),
)
self.session[sso_session_expired.session_key] = sso_session_expired.to_dict()
self.save_session()
self._test_paths_with_status(401)
def test_sso_redirect_url_internal(self) -> None:
sso_session_expired = SsoSession(
self.organization.id,
datetime.now(tz=timezone.utc) - SSO_EXPIRY_TIME - timedelta(hours=1),
)
self.session[sso_session_expired.session_key] = sso_session_expired.to_dict()
self.save_session()
resp = self.client.get(
f"/api/0/teams/{self.organization.slug}/{self.team.slug}/",
HTTP_REFERER=f"/organizations/{self.organization.slug}/teams",
)
assert (
resp.json()["detail"]["extra"]["loginUrl"]
== "/auth/login/foo/?next=%2Forganizations%2Ffoo%2Fteams"
)
def test_sso_redirect_url_internal_with_domain(self) -> None:
sso_session_expired = SsoSession(
self.organization.id,
datetime.now(tz=timezone.utc) - SSO_EXPIRY_TIME - timedelta(hours=1),
)
self.session[sso_session_expired.session_key] = sso_session_expired.to_dict()
self.save_session()
resp = self.client.get(
f"/api/0/teams/{self.organization.slug}/{self.team.slug}/",
HTTP_REFERER=f"https://testdomain.com/organizations/{self.organization.slug}/teams",
HTTP_HOST="testdomain.com",
)
assert (
resp.json()["detail"]["extra"]["loginUrl"]
== "/auth/login/foo/?next=https%3A%2F%2Ftestdomain.com%2Forganizations%2Ffoo%2Fteams"
)
def test_sso_redirect_url_external_removed(self) -> None:
sso_session_expired = SsoSession(
self.organization.id,
datetime.now(tz=timezone.utc) - SSO_EXPIRY_TIME - timedelta(hours=1),
)
self.session[sso_session_expired.session_key] = sso_session_expired.to_dict()
self.save_session()
resp = self.client.get(
f"/api/0/teams/{self.organization.slug}/{self.team.slug}/",
HTTP_REFERER="http://example.com",
)
assert resp.json()["detail"]["extra"]["loginUrl"] == "/auth/login/foo/"
def _test_paths_with_status(self, status):
for path in self.paths:
resp = self.client.get(path)
assert resp.status_code == status, (resp.status_code, resp.content)
|
AuthenticationTest
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/composition.py
|
{
"start": 2377,
"end": 2493
}
|
class ____:
"""Marker for holding places in fan-in lists where input mappings will feed."""
|
MappedInputPlaceholder
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/datamodels/backfills.py
|
{
"start": 2009,
"end": 2141
}
|
class ____(BaseModel):
"""Backfill serializer for responses in dry-run mode."""
logical_date: datetime
|
DryRunBackfillResponse
|
python
|
wandb__wandb
|
wandb/automations/_filters/operators.py
|
{
"start": 6120,
"end": 6421
}
|
class ____(BaseOp):
val: TupleOf[Scalar] = Field(default=(), alias="$nin")
@override
def __invert__(self) -> In:
"""Implements `~NotIn(a) -> In(a)`."""
return In(val=self.val)
# Element operator(s)
# https://www.mongodb.com/docs/manual/reference/operator/query/exists/
|
NotIn
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/coercions.py
|
{
"start": 27779,
"end": 28119
}
|
class ____(ByOfImpl, RoleImpl):
__slots__ = ()
def _post_coercion(self, resolved, **kw):
if (
isinstance(resolved, self._role_class)
and resolved._order_by_label_element is not None
):
return elements._label_reference(resolved)
else:
return resolved
|
OrderByImpl
|
python
|
django__django
|
tests/generic_views/views.py
|
{
"start": 4828,
"end": 4924
}
|
class ____(generic.DeleteView):
model = Author
success_url = "/list/authors/"
|
AuthorDelete
|
python
|
astropy__astropy
|
astropy/modeling/tests/test_bounding_box.py
|
{
"start": 66313,
"end": 71522
}
|
class ____:
def test_create(self):
index = mk.MagicMock()
ignore = mk.MagicMock()
argument = _SelectorArgument(index, ignore)
assert isinstance(argument, _BaseSelectorArgument)
assert argument.index == index
assert argument.ignore == ignore
assert argument == (index, ignore)
def test_validate(self):
model = Gaussian2D()
# default integer
assert _SelectorArgument.validate(model, 0) == (0, True)
assert _SelectorArgument.validate(model, 1) == (1, True)
# default string
assert _SelectorArgument.validate(model, "x") == (0, True)
assert _SelectorArgument.validate(model, "y") == (1, True)
ignore = mk.MagicMock()
# non-default integer
assert _SelectorArgument.validate(model, 0, ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 1, ignore) == (1, ignore)
# non-default string
assert _SelectorArgument.validate(model, "x", ignore) == (0, ignore)
assert _SelectorArgument.validate(model, "y", ignore) == (1, ignore)
# Fail
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
_SelectorArgument.validate(model, "z")
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer."
):
_SelectorArgument.validate(model, mk.MagicMock())
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
_SelectorArgument.validate(model, 2)
def test_get_selector(self):
# single inputs
inputs = [idx + 17 for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
# numpy array of single inputs
inputs = [np.array([idx + 11]) for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
inputs = [np.asanyarray(idx + 13) for idx in range(3)]
for index in range(3):
assert (
_SelectorArgument(index, mk.MagicMock()).get_selector(*inputs)
== inputs[index]
)
# multi entry numpy array
inputs = [np.array([idx + 27, idx - 31]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(
*inputs
) == tuple(inputs[index])
def test_name(self):
model = Gaussian2D()
for index in range(model.n_inputs):
assert (
_SelectorArgument(index, mk.MagicMock()).name(model)
== model.inputs[index]
)
def test_pretty_repr(self):
model = Gaussian2D()
assert (
_SelectorArgument(0, False).pretty_repr(model)
== "Argument(name='x', ignore=False)"
)
assert (
_SelectorArgument(0, True).pretty_repr(model)
== "Argument(name='x', ignore=True)"
)
assert (
_SelectorArgument(1, False).pretty_repr(model)
== "Argument(name='y', ignore=False)"
)
assert (
_SelectorArgument(1, True).pretty_repr(model)
== "Argument(name='y', ignore=True)"
)
def test_get_fixed_value(self):
model = Gaussian2D()
values = {0: 5, "y": 7}
# Get index value
assert _SelectorArgument(0, mk.MagicMock()).get_fixed_value(model, values) == 5
# Get name value
assert _SelectorArgument(1, mk.MagicMock()).get_fixed_value(model, values) == 7
# Fail
MESSAGE = r".* was not found in .*"
with pytest.raises(RuntimeError, match=MESSAGE) as err:
_SelectorArgument(1, True).get_fixed_value(model, {0: 5})
def test_is_argument(self):
model = Gaussian2D()
argument = _SelectorArgument.validate(model, 0)
# Is true
assert argument.is_argument(model, 0) is True
assert argument.is_argument(model, "x") is True
# Is false
assert argument.is_argument(model, 1) is False
assert argument.is_argument(model, "y") is False
# Fail
with pytest.raises(ValueError, match=r"'.*' is not one of the inputs: .*"):
argument.is_argument(model, "z")
with pytest.raises(
ValueError, match=r"Key value: .* must be string or integer"
):
argument.is_argument(model, mk.MagicMock())
with pytest.raises(
IndexError, match=r"Integer key: .* must be non-negative and < .*"
):
argument.is_argument(model, 2)
def test_named_tuple(self):
model = Gaussian2D()
for index in range(model.n_inputs):
ignore = mk.MagicMock()
assert _SelectorArgument(index, ignore).named_tuple(model) == (
model.inputs[index],
ignore,
)
|
Test_SelectorArgument
|
python
|
numba__numba
|
numba/tests/test_builtins.py
|
{
"start": 34449,
"end": 35626
}
|
class ____(TestCase):
def test_eq_ne(self):
for opstr in ('eq', 'ne'):
op = getattr(operator, opstr)
@njit
def func(a, b):
return op(a, b)
# all these things should evaluate to being equal or not, all should
# survive typing.
things = (1, 0, True, False, 1.0, 2.0, 1.1, 1j, None, "", "1")
for x, y in itertools.product(things, things):
self.assertPreciseEqual(func.py_func(x, y), func(x, y))
def test_cmp(self):
for opstr in ('gt', 'lt', 'ge', 'le', 'eq', 'ne'):
op = getattr(operator, opstr)
@njit
def func(a, b):
return op(a, b)
# numerical things should all be comparable
things = (1, 0, True, False, 1.0, 0.0, 1.1)
for x, y in itertools.product(things, things):
expected = func.py_func(x, y)
got = func(x, y)
message = ("%s %s %s does not match between Python and Numba"
% (x, opstr, y))
self.assertEqual(expected, got, message)
|
TestOperatorMixedTypes
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/mpich2/package.py
|
{
"start": 217,
"end": 1067
}
|
class ____(Package):
homepage = "http://www.mpich.org"
url = "http://www.mpich.org/static/downloads/1.5/mpich2-1.5.tar.gz"
list_url = "http://www.mpich.org/static/downloads/"
list_depth = 2
tags = ["tag1", "tag3"]
version("1.5", md5="9c5d5d4fe1e17dd12153f40bc5b6dbc0")
version("1.4", md5="0123456789abcdef0123456789abcdef")
version("1.3", md5="0123456789abcdef0123456789abcdef")
version("1.2", md5="0123456789abcdef0123456789abcdef")
version("1.1", md5="0123456789abcdef0123456789abcdef")
version("1.0", md5="0123456789abcdef0123456789abcdef")
provides("mpi@:2.0")
provides("mpi@:2.1", when="@1.1:")
provides("mpi@:2.2", when="@1.2:")
depends_on("c", type="build")
def install(self, spec, prefix):
configure("--prefix=%s" % prefix)
make()
make("install")
|
Mpich2
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_captured_logs.py
|
{
"start": 1176,
"end": 7566
}
|
class ____(ExecutingGraphQLContextTestMatrix):
def test_get_captured_logs_over_graphql(self, graphql_context):
selector = infer_job_selector(graphql_context, "spew_job")
payload = sync_execute_get_run_log_data(
context=graphql_context,
variables={"executionParams": {"selector": selector, "mode": "default"}},
)
run_id = payload["run"]["runId"]
logs = graphql_context.instance.all_logs(run_id, of_type=DagsterEventType.LOGS_CAPTURED)
assert len(logs) == 1
entry = logs[0]
log_key = [run_id, "compute_logs", entry.dagster_event.logs_captured_data.file_key]
result = execute_dagster_graphql(
graphql_context,
CAPTURED_LOGS_QUERY,
variables={"logKey": log_key},
)
stdout = result.data["capturedLogs"]["stdout"]
assert stdout == "HELLO WORLD\n"
def test_captured_logs_subscription_graphql(self, graphql_context):
selector = infer_job_selector(graphql_context, "spew_job")
payload = sync_execute_get_run_log_data(
context=graphql_context,
variables={"executionParams": {"selector": selector, "mode": "default"}},
)
run_id = payload["run"]["runId"]
logs = graphql_context.instance.all_logs(run_id, of_type=DagsterEventType.LOGS_CAPTURED)
assert len(logs) == 1
entry = logs[0]
log_key = [run_id, "compute_logs", entry.dagster_event.logs_captured_data.file_key]
results = execute_dagster_graphql_subscription(
graphql_context,
CAPTURED_LOGS_SUBSCRIPTION,
variables={"logKey": log_key},
)
assert len(results) == 1
stdout = results[0].data["capturedLogs"]["stdout"]
assert stdout == "HELLO WORLD\n"
def test_captured_logs_event_graphql(self, graphql_context):
selector = infer_job_selector(graphql_context, "spew_job")
payload = sync_execute_get_run_log_data(
context=graphql_context,
variables={"executionParams": {"selector": selector, "mode": "default"}},
)
run_id = payload["run"]["runId"]
result = execute_dagster_graphql(
graphql_context,
CAPTURED_LOGS_EVENT_QUERY,
variables={"runId": run_id},
)
assert result.data["runOrError"]["__typename"] == "Run"
events = result.data["runOrError"]["eventConnection"]["events"]
assert len(events) > 0
def test_captured_logs_with_invalid_utf8(self, graphql_context):
"""Test that captured logs handle invalid UTF-8 sequences gracefully.
This test verifies the fix for issue #32251 where invalid UTF-8 bytes
in stderr would cause GraphQL query failures.
"""
import os
# Create a unique log key for this test
log_key = ["test_invalid_utf8", "compute_logs", "test_step"]
# Write binary data with invalid UTF-8 to stderr
compute_log_manager = graphql_context.instance.compute_log_manager
stderr_path = compute_log_manager.get_captured_local_path(log_key, "err")
# Ensure directory exists
os.makedirs(os.path.dirname(stderr_path), exist_ok=True)
# Write test data with invalid UTF-8 sequences
with open(stderr_path, "wb") as f:
f.write(b"Valid text before\n")
f.write(b"\xff\xfe") # Invalid UTF-8 sequence
f.write(b"\nValid text after\n")
try:
# Query should not raise an exception
result = execute_dagster_graphql(
graphql_context,
CAPTURED_LOGS_QUERY,
variables={"logKey": log_key},
)
# Verify we got a result (not an error)
assert result.data is not None
assert result.data["capturedLogs"] is not None
# Stderr should contain the replacement character (�)
stderr = result.data["capturedLogs"]["stderr"]
assert stderr is not None
assert "Valid text before" in stderr
assert "Valid text after" in stderr
# The invalid bytes should be replaced with replacement character
assert "\ufffd" in stderr or "�" in stderr
finally:
# Cleanup test file
if os.path.exists(stderr_path):
os.remove(stderr_path)
def test_captured_logs_subscription_with_invalid_utf8(self, graphql_context):
"""Test that captured logs subscription handles invalid UTF-8 gracefully.
This test verifies the fix works for GraphQL subscriptions as well as queries.
"""
import os
# Create a unique log key for this test
log_key = ["test_invalid_utf8_sub", "compute_logs", "test_step"]
# Write binary data with invalid UTF-8 to stderr
compute_log_manager = graphql_context.instance.compute_log_manager
stderr_path = compute_log_manager.get_captured_local_path(log_key, "err")
# Ensure directory exists
os.makedirs(os.path.dirname(stderr_path), exist_ok=True)
# Write test data simulating partial multi-byte UTF-8 at chunk boundary
with open(stderr_path, "wb") as f:
# Write valid text
f.write(b"Subscription test\n")
# Add invalid UTF-8 (partial 4-byte sequence)
f.write(b"\xf0\x9f") # First 2 bytes of 4-byte emoji, incomplete
f.write(b"\nMore text\n")
try:
# Subscription should not raise an exception
results = execute_dagster_graphql_subscription(
graphql_context,
CAPTURED_LOGS_SUBSCRIPTION,
variables={"logKey": log_key},
)
# Verify we got results
assert len(results) > 0
# First result should contain data
assert results[0].data is not None
assert results[0].data["capturedLogs"] is not None
# Stderr should be readable
stderr = results[0].data["capturedLogs"]["stderr"]
assert stderr is not None
assert "Subscription test" in stderr
assert "More text" in stderr
finally:
# Cleanup test file
if os.path.exists(stderr_path):
os.remove(stderr_path)
|
TestCapturedLogs
|
python
|
tornadoweb__tornado
|
tornado/httputil.py
|
{
"start": 13859,
"end": 23218
}
|
class ____:
"""A single HTTP request.
All attributes are type `str` unless otherwise noted.
.. attribute:: method
HTTP request method, e.g. "GET" or "POST"
.. attribute:: uri
The requested uri.
.. attribute:: path
The path portion of `uri`
.. attribute:: query
The query portion of `uri`
.. attribute:: version
HTTP version specified in request, e.g. "HTTP/1.1"
.. attribute:: headers
`.HTTPHeaders` dictionary-like object for request headers. Acts like
a case-insensitive dictionary with additional methods for repeated
headers.
.. attribute:: body
Request body, if present, as a byte string.
.. attribute:: remote_ip
Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
will pass along the real IP address provided by a load balancer
in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
.. versionchanged:: 3.1
The list format of ``X-Forwarded-For`` is now supported.
.. attribute:: protocol
The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
is set, will pass along the protocol used by a load balancer if
reported via an ``X-Scheme`` header.
.. attribute:: host
The requested hostname, usually taken from the ``Host`` header.
.. attribute:: arguments
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names are of type `str`, while arguments
are byte strings. Note that this is different from
`.RequestHandler.get_argument`, which returns argument values as
unicode strings.
.. attribute:: query_arguments
Same format as ``arguments``, but contains only arguments extracted
from the query string.
.. versionadded:: 3.2
.. attribute:: body_arguments
Same format as ``arguments``, but contains only arguments extracted
from the request body.
.. versionadded:: 3.2
.. attribute:: files
File uploads are available in the files property, which maps file
names to lists of `.HTTPFile`.
.. attribute:: connection
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
.. versionchanged:: 4.0
Moved from ``tornado.httpserver.HTTPRequest``.
.. deprecated:: 6.5.2
The ``host`` argument to the ``HTTPServerRequest`` constructor is deprecated. Use
``headers["Host"]`` instead. This argument was mistakenly removed in Tornado 6.5.0 and
temporarily restored in 6.5.2.
"""
path = None # type: str
query = None # type: str
# HACK: Used for stream_request_body
_body_future = None # type: Future[None]
def __init__(
self,
method: Optional[str] = None,
uri: Optional[str] = None,
version: str = "HTTP/1.0",
headers: Optional[HTTPHeaders] = None,
body: Optional[bytes] = None,
host: Optional[str] = None,
files: Optional[Dict[str, List["HTTPFile"]]] = None,
connection: Optional["HTTPConnection"] = None,
start_line: Optional["RequestStartLine"] = None,
server_connection: Optional[object] = None,
) -> None:
if start_line is not None:
method, uri, version = start_line
assert method
self.method = method
assert uri
self.uri = uri
self.version = version
self.headers = headers or HTTPHeaders()
self.body = body or b""
# set remote IP and protocol
context = getattr(connection, "context", None)
self.remote_ip = getattr(context, "remote_ip", None)
self.protocol = getattr(context, "protocol", "http")
try:
self.host = host or self.headers["Host"]
except KeyError:
if version == "HTTP/1.0":
# HTTP/1.0 does not require the Host header.
self.host = "127.0.0.1"
else:
raise HTTPInputError("Missing Host header")
if not _ABNF.host.fullmatch(self.host):
raise HTTPInputError("Invalid Host header: %r" % self.host)
if "," in self.host:
# https://www.rfc-editor.org/rfc/rfc9112.html#name-request-target
# Server MUST respond with 400 Bad Request if multiple
# Host headers are present.
#
# We test for the presence of a comma instead of the number of
# headers received because a proxy may have converted
# multiple headers into a single comma-separated value
# (per RFC 9110 section 5.3).
#
# This is technically a departure from the RFC since the ABNF
# does not forbid commas in the host header. However, since
# commas are not allowed in DNS names, it is appropriate to
# disallow them. (The same argument could be made for other special
# characters, but commas are the most problematic since they could
# be used to exploit differences between proxies when multiple headers
# are supplied).
raise HTTPInputError("Multiple host headers not allowed: %r" % self.host)
self.host_name = split_host_and_port(self.host.lower())[0]
self.files = files or {}
self.connection = connection
self.server_connection = server_connection
self._start_time = time.time()
self._finish_time = None
if uri is not None:
self.path, sep, self.query = uri.partition("?")
self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
self.query_arguments = copy.deepcopy(self.arguments)
self.body_arguments = {} # type: Dict[str, List[bytes]]
@property
def cookies(self) -> Dict[str, http.cookies.Morsel]:
"""A dictionary of ``http.cookies.Morsel`` objects."""
if not hasattr(self, "_cookies"):
self._cookies = (
http.cookies.SimpleCookie()
) # type: http.cookies.SimpleCookie
if "Cookie" in self.headers:
try:
parsed = parse_cookie(self.headers["Cookie"])
except Exception:
pass
else:
for k, v in parsed.items():
try:
self._cookies[k] = v
except Exception:
# SimpleCookie imposes some restrictions on keys;
# parse_cookie does not. Discard any cookies
# with disallowed keys.
pass
return self._cookies
def full_url(self) -> str:
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri # type: ignore[operator]
def request_time(self) -> float:
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def get_ssl_certificate(
self, binary_form: bool = False
) -> Union[None, Dict, bytes]:
"""Returns the client's SSL certificate, if any.
To use client certificates, the HTTPServer's
`ssl.SSLContext.verify_mode` field must be set, e.g.::
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain("foo.crt", "foo.key")
ssl_ctx.load_verify_locations("cacerts.pem")
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
server = HTTPServer(app, ssl_options=ssl_ctx)
By default, the return value is a dictionary (or None, if no
client certificate is present). If ``binary_form`` is true, a
DER-encoded form of the certificate is returned instead. See
SSLSocket.getpeercert() in the standard library for more
details.
http://docs.python.org/library/ssl.html#sslsocket-objects
"""
try:
if self.connection is None:
return None
# TODO: add a method to HTTPConnection for this so it can work with HTTP/2
return self.connection.stream.socket.getpeercert( # type: ignore
binary_form=binary_form
)
except SSLError:
return None
def _parse_body(self) -> None:
parse_body_arguments(
self.headers.get("Content-Type", ""),
self.body,
self.body_arguments,
self.files,
self.headers,
)
for k, v in self.body_arguments.items():
self.arguments.setdefault(k, []).extend(v)
def __repr__(self) -> str:
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join([f"{n}={getattr(self, n)!r}" for n in attrs])
return f"{self.__class__.__name__}({args})"
|
HTTPServerRequest
|
python
|
pytorch__pytorch
|
test/mobile/lightweight_dispatch/tests_setup.py
|
{
"start": 827,
"end": 1129
}
|
class ____(torch.nn.Module):
def forward(self, x: int):
a = torch.ones(
size=[3, x],
dtype=torch.int64,
layout=torch.strided,
device="cpu",
pin_memory=False,
)
return a
@save_model
|
ModelWithDTypeDeviceLayoutPinMemory
|
python
|
sqlalchemy__sqlalchemy
|
test/perf/orm2010.py
|
{
"start": 883,
"end": 5645
}
|
class ____(Employee):
__tablename__ = "grunt"
id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
savings = Column(Numeric)
employer_id = Column(Integer, ForeignKey("boss.id"))
employer = relationship(
"Boss", backref="employees", primaryjoin=Boss.id == employer_id
)
__mapper_args__ = {"polymorphic_identity": "grunt"}
if os.path.exists("orm2010.db"):
os.remove("orm2010.db")
# use a file based database so that cursor.execute() has some
# palpable overhead.
engine = create_engine("sqlite:///orm2010.db")
Base.metadata.create_all(engine)
sess = Session(engine)
def runit_persist(status, factor=1, query_runs=5):
num_bosses = 100 * factor
num_grunts = num_bosses * 100
bosses = [
Boss(name="Boss %d" % i, golf_average=Decimal(random.randint(40, 150)))
for i in range(num_bosses)
]
sess.add_all(bosses)
status("Added %d boss objects" % num_bosses)
grunts = [
Grunt(
name="Grunt %d" % i,
savings=Decimal(random.randint(5000000, 15000000) / 100),
)
for i in range(num_grunts)
]
status("Added %d grunt objects" % num_grunts)
while grunts:
# this doesn't associate grunts with bosses evenly,
# just associates lots of them with a relatively small
# handful of bosses
batch_size = 100
batch_num = (num_grunts - len(grunts)) / batch_size
boss = sess.query(Boss).filter_by(name="Boss %d" % batch_num).first()
for grunt in grunts[0:batch_size]:
grunt.employer = boss
grunts = grunts[batch_size:]
sess.commit()
status("Associated grunts w/ bosses and committed")
def runit_query_runs(status, factor=1, query_runs=5):
# do some heavier reading
for i in range(query_runs):
status("Heavy query run #%d" % (i + 1))
report = []
# load all the Grunts, print a report with their name, stats,
# and their bosses' stats.
for grunt in sess.query(Grunt):
report.append(
(
grunt.name,
grunt.savings,
grunt.employer.name,
grunt.employer.golf_average,
)
)
sess.close() # close out the session
def run_with_profile(runsnake=False, dump=False):
import cProfile
import pstats
filename = "orm2010.profile"
if os.path.exists("orm2010.profile"):
os.remove("orm2010.profile")
def status(msg):
print(msg)
cProfile.runctx(
# "runit_persist(status)",
"runit_persist(status); runit_query_runs(status)",
globals(),
locals(),
filename,
)
stats = pstats.Stats(filename)
counts_by_methname = {key[2]: stats.stats[key][0] for key in stats.stats}
print("SQLA Version: %s" % __version__)
print("Total calls %d" % stats.total_calls)
print("Total cpu seconds: %.2f" % stats.total_tt)
print(
"Total execute calls: %d"
% counts_by_methname["<method 'execute' of 'sqlite3.Cursor' objects>"]
)
print(
"Total executemany calls: %d"
% counts_by_methname.get(
"<method 'executemany' of 'sqlite3.Cursor' objects>", 0
)
)
if dump:
# stats.sort_stats("nfl")
stats.sort_stats("cumtime", "calls")
stats.print_stats()
# stats.print_callers()
if runsnake:
os.system("runsnake %s" % filename)
def run_with_time(factor):
import time
now = time.time()
def status(msg):
print("%d - %s" % (time.time() - now, msg))
runit_persist(status, factor)
print("Total time: %d" % (time.time() - now))
runit_query_runs(status, factor)
print("Total time: %d" % (time.time() - now))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--profile",
action="store_true",
help="run shorter test suite w/ cprofilng",
)
parser.add_argument(
"--dump",
action="store_true",
help="dump full call profile (implies --profile)",
)
parser.add_argument(
"--runsnake",
action="store_true",
help="invoke runsnakerun (implies --profile)",
)
parser.add_argument(
"--factor",
type=int,
default=10,
help="scale factor, a multiple of how many records to work with. "
"defaults to 10",
)
args = parser.parse_args()
args.profile = args.profile or args.dump or args.runsnake
if args.profile:
run_with_profile(runsnake=args.runsnake, dump=args.dump)
else:
run_with_time(args.factor)
|
Grunt
|
python
|
apache__airflow
|
providers/grpc/src/airflow/providers/grpc/operators/grpc.py
|
{
"start": 1254,
"end": 4010
}
|
class ____(BaseOperator):
"""
Calls a gRPC endpoint to execute an action.
:param stub_class: The stub client to use for this gRPC call
:param call_func: The client function name to call the gRPC endpoint
:param grpc_conn_id: The connection to run the operator against
:param data: The data to pass to the rpc call
:param interceptors: A list of gRPC interceptor objects to be used on the channel
:param custom_connection_func: The customized connection function to return channel object.
A callable that accepts the connection as its only arg.
:param streaming: A flag to indicate if the call is a streaming call
:param response_callback: The callback function to process the response from gRPC call,
takes in response object and context object, context object can be used to perform
push xcom or other after task actions
:param log_response: A flag to indicate if we need to log the response
"""
template_fields: Sequence[str] = ("stub_class", "call_func", "data")
template_fields_renderers = {"data": "py"}
def __init__(
self,
*,
stub_class: Callable,
call_func: str,
grpc_conn_id: str = "grpc_default",
data: dict | None = None,
interceptors: list[Callable] | None = None,
custom_connection_func: Callable | None = None,
streaming: bool = False,
response_callback: Callable | None = None,
log_response: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.stub_class = stub_class
self.call_func = call_func
self.grpc_conn_id = grpc_conn_id
self.data = data or {}
self.interceptors = interceptors
self.custom_connection_func = custom_connection_func
self.streaming = streaming
self.log_response = log_response
self.response_callback = response_callback
def _get_grpc_hook(self) -> GrpcHook:
return GrpcHook(
self.grpc_conn_id,
interceptors=self.interceptors,
custom_connection_func=self.custom_connection_func,
)
def execute(self, context: Context) -> None:
hook = self._get_grpc_hook()
self.log.info("Calling gRPC service")
# grpc hook always yield
responses = hook.run(self.stub_class, self.call_func, streaming=self.streaming, data=self.data)
for response in responses:
self._handle_response(response, context)
def _handle_response(self, response: Any, context: Context) -> None:
if self.log_response:
self.log.info("%r", response)
if self.response_callback:
self.response_callback(response, context)
|
GrpcOperator
|
python
|
huggingface__transformers
|
src/transformers/models/llava_next_video/modular_llava_next_video.py
|
{
"start": 25913,
"end": 34787
}
|
class ____(LlavaNextForConditionalGeneration):
def get_video_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
):
return self.model.get_video_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_sizes: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, LlavaNextVideoCausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> import av
>>> from transformers import AutoProcessor, LlavaNextVideoForConditionalGeneration
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> model = LlavaNextVideoForConditionalGeneration.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf", device_map="auto")
>>> processor = AutoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf")
>>> prompt = "USER: <video>\nWhy is this video funny? ASSISTANT:"
>>> video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset")
>>> container = av.open(video_path)
>>> # sample uniformly 8 frames from the video (model was trained with 32 frames per video, but this video is short)
>>> total_frames = container.streams.video[0].frames
>>> indices = np.arange(0, total_frames, total_frames / 8).astype(int)
>>> clip = read_video_pyav(container, indices)
>>> inputs_video = processor(text=prompt, videos=clip, return_tensors="pt").to(model.device)
>>> # load an image to generate from an image
>>> prompt = "USER:<image>\nWhat is shown in this image? ASSISTANT:"
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs_image = processor(text=prompt, images=image, return_tensors="pt").to(model.device)
>>> # Generate from video
>>> generate_ids = model.generate(**inputs_video, max_length=50)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"USER:\nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and endearing sight of a baby wearing glasses and (...)"
>>> # Generate from image
>>> generate_ids = model.generate(**inputs_image, max_length=30)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"USER: \nWhat's the content of the image? ASSISTANT: The image shows a red stop sign on a pole, with a traditional Chinese archway (...)"
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
image_sizes=image_sizes,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return LlavaNextVideoCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
video_hidden_states=outputs.video_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
pixel_values_videos=None,
image_sizes=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- extra custom processing
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
if cache_position[0] == 0:
model_inputs["pixel_values"] = pixel_values
model_inputs["pixel_values_videos"] = pixel_values_videos
model_inputs["image_sizes"] = image_sizes
return model_inputs
__all__ = [
"LlavaNextVideoConfig",
"LlavaNextVideoForConditionalGeneration",
"LlavaNextVideoModel",
"LlavaNextVideoPreTrainedModel",
]
|
LlavaNextVideoForConditionalGeneration
|
python
|
django__django
|
tests/staticfiles_tests/test_management.py
|
{
"start": 17262,
"end": 17349
}
|
class ____(TestCollectionDryRun):
pass
|
TestCollectionDryRunManifestStaticFilesStorage
|
python
|
zostera__django-bootstrap4
|
src/bootstrap4/templatetags/bootstrap4.py
|
{
"start": 17227,
"end": 23533
}
|
class ____(template.Node):
def __init__(self, nodelist, args, kwargs, asvar, **kwargs2):
self.nodelist = nodelist
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
output_kwargs = {}
for key in self.kwargs:
output_kwargs[key] = handle_var(self.kwargs[key], context)
buttons = []
submit = output_kwargs.get("submit", None)
reset = output_kwargs.get("reset", None)
if submit:
buttons.append(bootstrap_button(submit, "submit"))
if reset:
buttons.append(bootstrap_button(reset, "reset"))
buttons = " ".join(buttons) + self.nodelist.render(context)
output_kwargs.update({"label": None, "field": buttons})
css_class = output_kwargs.pop("form_group_class", "form-group")
output = render_form_group(render_field_and_label(**output_kwargs), css_class=css_class)
if self.asvar:
context[self.asvar] = output
return ""
else:
return output
@register.simple_tag(takes_context=True)
def bootstrap_messages(context, *args, **kwargs):
"""
Show django.contrib.messages Messages in Bootstrap alert containers.
In order to make the alerts dismissible (with the close button),
we have to set the jquery parameter too when using the
bootstrap_javascript tag.
Uses the template ``bootstrap4/messages.html``.
**Tag name**::
bootstrap_messages
**Parameters**::
None.
**Usage**::
{% bootstrap_messages %}
**Example**::
{% bootstrap_javascript jquery=True %}
{% bootstrap_messages %}
"""
# Force Context to dict
if isinstance(context, Context):
context = context.flatten()
context.update({"message_constants": message_constants})
return render_template_file("bootstrap4/messages.html", context=context)
@register.inclusion_tag("bootstrap4/pagination.html")
def bootstrap_pagination(page, **kwargs):
"""
Render pagination for a page.
**Tag name**::
bootstrap_pagination
**Parameters**::
page
The page of results to show.
pages_to_show
Number of pages in total
:default: ``11``
url
URL to navigate to for pagination forward and pagination back.
:default: ``None``
size
Controls the size of the pagination through CSS. Defaults to being normal sized.
One of the following:
* ``'small'``
* ``'large'``
:default: ``None``
extra
Any extra page parameters.
:default: ``None``
parameter_name
Name of the paging URL parameter.
:default: ``'page'``
**Usage**::
{% bootstrap_pagination page %}
**Example**::
{% bootstrap_pagination lines url="/pagination?page=1" size="large" %}
**Tip**::
If you want to repeat the query string arguments in subsequent pagination links,
use the "extra" parameter with "request.GET.urlencode":
{% bootstrap_pagination page_obj extra=request.GET.urlencode %}
"""
pagination_kwargs = kwargs.copy()
pagination_kwargs["page"] = page
return get_pagination_context(**pagination_kwargs)
@register.simple_tag
def bootstrap_url_replace_param(url, name, value):
return url_replace_param(url, name, value)
def get_pagination_context(
page, pages_to_show=11, url=None, size=None, justify_content=None, extra=None, parameter_name="page"
):
"""Generate Bootstrap pagination context from a page object."""
pages_to_show = int(pages_to_show)
if pages_to_show < 1:
raise ValueError(f"Pagination pages_to_show should be a positive integer, you specified {pages_to_show}.")
num_pages = page.paginator.num_pages
current_page = page.number
half_page_num = int(floor(pages_to_show / 2))
if half_page_num < 0:
half_page_num = 0
first_page = current_page - half_page_num
if first_page <= 1:
first_page = 1
if first_page > 1:
pages_back = first_page - half_page_num
if pages_back < 1:
pages_back = 1
else:
pages_back = None
last_page = first_page + pages_to_show - 1
if pages_back is None:
last_page += 1
if last_page > num_pages:
last_page = num_pages
if last_page < num_pages:
pages_forward = last_page + half_page_num
if pages_forward > num_pages:
pages_forward = num_pages
else:
pages_forward = None
if first_page > 1:
first_page -= 1
if pages_back is not None and pages_back > 1:
pages_back -= 1
else:
pages_back = None
pages_shown = []
for i in range(first_page, last_page + 1):
pages_shown.append(i)
# parse the url
parts = urlparse(url or "")
params = parse_qs(parts.query)
# append extra querystring parameters to the url.
if extra:
params.update(parse_qs(extra))
# build url again.
url = urlunparse(
[parts.scheme, parts.netloc, parts.path, parts.params, urlencode(params, doseq=True), parts.fragment]
)
# Set CSS classes, see http://getbootstrap.com/components/#pagination
pagination_css_classes = ["pagination"]
if size == "small":
pagination_css_classes.append("pagination-sm")
elif size == "large":
pagination_css_classes.append("pagination-lg")
if justify_content == "start":
pagination_css_classes.append("justify-content-start")
elif justify_content == "center":
pagination_css_classes.append("justify-content-center")
elif justify_content == "end":
pagination_css_classes.append("justify-content-end")
return {
"bootstrap_pagination_url": url,
"num_pages": num_pages,
"current_page": current_page,
"first_page": first_page,
"last_page": last_page,
"pages_shown": pages_shown,
"pages_back": pages_back,
"pages_forward": pages_forward,
"pagination_css_classes": " ".join(pagination_css_classes),
"parameter_name": parameter_name,
}
|
ButtonsNode
|
python
|
cython__cython
|
Cython/Compiler/Nodes.py
|
{
"start": 58272,
"end": 58835
}
|
class ____(CBaseTypeNode):
# components [CBaseTypeNode]
child_attrs = ["components"]
def analyse(self, env, could_be_name=False):
component_types = []
for c in self.components:
type = c.analyse(env)
if type.is_pyobject:
error(c.pos, "Tuple types can't (yet) contain Python objects.")
return error_type
component_types.append(type)
entry = env.declare_tuple_type(self.pos, component_types)
entry.used = True
return entry.type
|
CTupleBaseTypeNode
|
python
|
realpython__materials
|
python-textual/vertical_layout_tcss.py
|
{
"start": 120,
"end": 413
}
|
class ____(App):
CSS_PATH = "vertical_layout.tcss"
def compose(self):
with Vertical():
for i in range(NUM_BOXES):
yield Static(f"Static {i + 1}")
if __name__ == "__main__":
app = VerticalLayoutAppWithTCSS()
app.run()
|
VerticalLayoutAppWithTCSS
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builder_test/packages/gnuconfig/package.py
|
{
"start": 216,
"end": 525
}
|
class ____(Package):
"""This package is needed to allow mocking AutotoolsPackage objects"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("2.0", md5="abcdef0123456789abcdef0123456789")
version("1.0", md5="0123456789abcdef0123456789abcdef")
|
Gnuconfig
|
python
|
run-llama__llama_index
|
llama-index-experimental/llama_index/experimental/param_tuner/base.py
|
{
"start": 1529,
"end": 2171
}
|
class ____(BaseModel):
"""Base param tuner."""
param_dict: Dict[str, Any] = Field(
..., description="A dictionary of parameters to iterate over."
)
fixed_param_dict: Dict[str, Any] = Field(
default_factory=dict,
description="A dictionary of fixed parameters passed to each job.",
)
show_progress: bool = False
@abstractmethod
def tune(self) -> TunedResult:
"""Tune parameters."""
async def atune(self) -> TunedResult:
"""
Async Tune parameters.
Override if you implement a native async method.
"""
return self.tune()
|
BaseParamTuner
|
python
|
huggingface__transformers
|
src/transformers/models/clip/modeling_clip.py
|
{
"start": 41744,
"end": 43815
}
|
class ____(CLIPPreTrainedModel):
main_input_name = "pixel_values"
input_modalities = ("image",)
def __init__(self, config: CLIPConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
vision_model = CLIPVisionModel._from_config(config.vision_config)
self.vision_model = vision_model.vision_model
# Classifier head
self.classifier = (
nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> ImageClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values,
**kwargs,
)
sequence_output = outputs.last_hidden_state
sequence_output = torch.mean(sequence_output[:, 1:, :], dim=1)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
return ImageClassifierOutput(
loss=loss,
logits=logits,
)
__all__ = [
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
"CLIPForImageClassification",
]
|
CLIPForImageClassification
|
python
|
plotly__plotly.py
|
plotly/graph_objs/waterfall/totals/marker/_line.py
|
{
"start": 233,
"end": 3176
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "waterfall.totals.marker"
_path_str = "waterfall.totals.marker.line"
_valid_props = {"color", "width"}
@property
def color(self):
"""
Sets the line color of all intermediate sums and total values.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def width(self):
"""
Sets the line width of all intermediate sums and total values.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the line color of all intermediate sums and total
values.
width
Sets the line width of all intermediate sums and total
values.
"""
def __init__(self, arg=None, color=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.totals.marker.Line`
color
Sets the line color of all intermediate sums and total
values.
width
Sets the line width of all intermediate sums and total
values.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.waterfall.totals.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.totals.marker.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Line
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/lookup_ops.py
|
{
"start": 29783,
"end": 31938
}
|
class ____(TextFileInitializer):
"""Table initializer for `int64` IDs to string tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.LINE_NUMBER,
value_column_index=TextFileIndex.WHOLE_LINE,
vocab_size=None,
delimiter="\t",
name="text_file_string_table_init"):
"""Constructs an initializer for an id-to-string table from a text file.
It populates a table that its key and value types are int64 and string,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by `key_column_index`
and `value_column_index`.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string or int64.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization. The
path must be accessible from wherever the graph is initialized (eg.
trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the keys
from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file to get the values
from. The default is to use the whole line content.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileStringTableInitializer, self).__init__(
filename,
dtypes.int64,
key_column_index,
dtypes.string,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
|
TextFileStringTableInitializer
|
python
|
has2k1__plotnine
|
plotnine/themes/themeable.py
|
{
"start": 64836,
"end": 65305
}
|
class ____(themeable):
def apply_figure(self, figure: Figure, targets: ThemeTargets):
warn(
"You no longer need to use subplots_adjust to make space for "
"the legend or text around the panels. This parameter will be "
"removed in a future version. You can still use 'plot_margin' "
"'panel_spacing' for your other spacing needs.",
FutureWarning,
)
@deprecated_themeable_name
|
subplots_adjust
|
python
|
doocs__leetcode
|
solution/1800-1899/1820.Maximum Number of Accepted Invitations/Solution.py
|
{
"start": 0,
"end": 548
}
|
class ____:
def maximumInvitations(self, grid: List[List[int]]) -> int:
def find(i):
for j, v in enumerate(grid[i]):
if v and j not in vis:
vis.add(j)
if match[j] == -1 or find(match[j]):
match[j] = i
return True
return False
m, n = len(grid), len(grid[0])
match = [-1] * n
ans = 0
for i in range(m):
vis = set()
ans += find(i)
return ans
|
Solution
|
python
|
pytorch__pytorch
|
torch/_dynamo/device_interface.py
|
{
"start": 14439,
"end": 17758
}
|
class ____(DeviceInterface):
device = torch.xpu.device # type: ignore[assignment]
Event = torch.xpu.Event # type: ignore[assignment]
Stream = torch.xpu.Stream # type: ignore[assignment]
# pyrefly: ignore [bad-override]
class Worker:
@staticmethod
def set_device(device: int) -> None:
caching_worker_current_devices["xpu"] = device
@staticmethod
def current_device() -> int:
if "xpu" in caching_worker_current_devices:
return caching_worker_current_devices["xpu"]
return torch.xpu.current_device()
@staticmethod
def get_device_properties(device: torch.types.Device = None) -> Any:
if device is not None:
if isinstance(device, str):
device = torch.device(device)
assert device.type == "xpu"
if isinstance(device, torch.device):
device = device.index
if device is None:
device = XpuInterface.Worker.current_device()
if "xpu" not in caching_worker_device_properties:
device_prop = [
torch.xpu.get_device_properties(i)
for i in range(torch.xpu.device_count())
]
caching_worker_device_properties["xpu"] = device_prop
return caching_worker_device_properties["xpu"][device]
current_device = staticmethod(torch.xpu.current_device)
set_device = staticmethod(torch.xpu.set_device)
device_count = staticmethod(torch.xpu.device_count) # type: ignore[has-type]
stream = staticmethod(torch.xpu.stream) # type: ignore[assignment]
# pyrefly: ignore [bad-override]
current_stream = staticmethod(torch.xpu.current_stream)
set_stream = staticmethod(torch.xpu.set_stream) # type: ignore[assignment]
_set_stream_by_id = staticmethod(torch.xpu._set_stream_by_id) # type: ignore[assignment]
synchronize = staticmethod(torch.xpu.synchronize)
get_device_properties = staticmethod(torch.xpu.get_device_properties) # type: ignore[assignment]
get_raw_stream = staticmethod(get_xpu_stream) # type: ignore[assignment, arg-type]
exchange_device = staticmethod(torch.xpu._exchange_device) # type: ignore[arg-type, has-type]
maybe_exchange_device = staticmethod(torch.xpu._maybe_exchange_device) # type: ignore[arg-type, has-type]
memory_allocated = staticmethod(torch.xpu.memory_allocated)
# Can be mock patched by @patch decorator.
@staticmethod
def is_available() -> bool:
return torch.xpu.is_available()
@staticmethod
def get_compute_capability(device: torch.types.Device = None) -> Any:
cc = torch.xpu.get_device_capability(device)
return cc
@staticmethod
def is_bf16_supported(including_emulation: bool = False) -> bool:
return torch.xpu.is_bf16_supported()
@staticmethod
def is_triton_capable(device: torch.types.Device = None) -> bool:
return True
@staticmethod
def raise_if_triton_unavailable(device: torch.types.Device = None) -> None:
import triton.backends
if "intel" not in triton.backends.backends:
raise RuntimeError("triton not built with the 'intel' backend")
@dataclass
|
XpuInterface
|
python
|
django-extensions__django-extensions
|
django_extensions/management/commands/sqldsn.py
|
{
"start": 2679,
"end": 6608
}
|
class ____(BaseCommand):
help = "Prints DSN on stdout, as specified in settings.py"
requires_system_checks: List[str] = []
can_import_settings = True
def add_arguments(self, parser):
super().add_arguments(parser)
dbspec = parser.add_mutually_exclusive_group()
dbspec.add_argument(
"-R",
"--router",
action="store",
dest="router",
default=DEFAULT_DB_ALIAS,
help=(
"Use this router-database other then default "
"(deprecated: use --database instead)"
),
)
dbspec.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
help=(
"Nominates a database to run command for. "
'Defaults to the "%s" database.'
)
% DEFAULT_DB_ALIAS,
)
styles = sorted(
set([style for _, style, _ in _FORMATTERS if style is not None])
)
parser.add_argument(
"-s",
"--style",
action="store",
dest="style",
default=None,
choices=styles + ["all"],
help="DSN format style.",
)
dbspec.add_argument(
"-a",
"--all",
action="store_true",
dest="all",
default=False,
help="Show DSN for all database routes",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
dest="quiet",
default=False,
help="Quiet mode only show DSN",
)
def handle(self, *args, **options):
self.style = color_style()
all_databases = options["all"]
if all_databases:
databases = settings.DATABASES.keys()
else:
databases = [options["database"]]
if options["router"] != DEFAULT_DB_ALIAS:
warnings.warn(
"--router is deprecated. You should use --database.",
RemovedInNextVersionWarning,
stacklevel=2,
)
databases = [options["router"]]
for i, database in enumerate(databases):
if i != 0:
sys.stdout.write("\n")
self.show_dsn(database, options)
def show_dsn(self, database, options):
dbinfo = settings.DATABASES.get(database)
quiet = options["quiet"]
dsn_style = options["style"]
if dbinfo is None:
raise CommandError("Unknown database %s" % database)
engine = dbinfo.get("ENGINE")
dbuser = dbinfo.get("USER")
dbpass = dbinfo.get("PASSWORD")
dbname = dbinfo.get("NAME")
dbhost = dbinfo.get("HOST")
dbport = dbinfo.get("PORT")
if dbport == "":
dbport = None
dsn = [
formatter(dbhost, dbport, dbname, dbuser, dbpass)
for engines, style, formatter in _FORMATTERS
if engine in engines
and (dsn_style == style or dsn_style == "all" and style is not None)
]
if not dsn:
available = ", ".join(
style
for engines, style, _ in _FORMATTERS
if engine in engines and style is not None
)
dsn = [
self.style.ERROR(
f"Invalid style {dsn_style} for {engine} (available: {available})"
if available
else "Unknown database, can't generate DSN"
)
]
if not quiet:
sys.stdout.write(
self.style.SQL_TABLE(
f"DSN for database {database!r} with engine {engine!r}:\n"
)
)
for output in dsn:
sys.stdout.write(f"{output}\n")
|
Command
|
python
|
google__jax
|
jax/_src/test_util.py
|
{
"start": 42985,
"end": 54286
}
|
class ____(parameterized.TestCase):
"""Base class for JAX tests including numerical checks and boilerplate."""
_default_global_config: dict[str, Any] = {}
_default_thread_local_config = {
'jax_enable_checks': True,
'jax_numpy_dtype_promotion': 'strict',
'jax_numpy_rank_promotion': 'raise',
'jax_traceback_filtering': 'off',
'jax_legacy_prng_key': 'error',
}
def setUp(self):
super().setUp()
self.enterContext(assert_global_configs_unchanged())
# We use the adler32 hash for two reasons.
# a) it is deterministic run to run, unlike hash() which is randomized.
# b) it returns values in int32 range, which RandomState requires.
self._rng = npr.RandomState(zlib.adler32(self._testMethodName.encode()))
self.enterContext(global_config_context(**self._default_global_config))
for config_name, value in self._default_thread_local_config.items():
self.enterContext(config.config_states[config_name](value))
if TEST_WITH_PERSISTENT_COMPILATION_CACHE.value:
assert TEST_NUM_THREADS.value <= 1, "Persistent compilation cache is not thread-safe."
self.enterContext(config.enable_compilation_cache(True))
self.enterContext(config.raise_persistent_cache_errors(True))
self.enterContext(config.persistent_cache_min_compile_time_secs(0))
self.enterContext(config.persistent_cache_min_entry_size_bytes(0))
tmp_dir = self.enterContext(tempfile.TemporaryDirectory())
self.enterContext(config.compilation_cache_dir(tmp_dir))
self.addCleanup(compilation_cache.reset_cache)
def tearDown(self) -> None:
assert core.reset_trace_state()
super().tearDown()
def rng(self):
return self._rng
def assertDeprecationWarnsOrRaises(self, deprecation_id: str, message: str):
"""Assert warning or error, depending on deprecation state.
For use with functions that call :func:`jax._src.deprecations.warn`.
"""
if deprecations.is_accelerated(deprecation_id):
return self.assertRaisesRegex(ValueError, message)
else:
return self.assertWarnsRegex(DeprecationWarning, message)
def assertArraysEqual(self, actual, desired, *, check_dtypes=True, err_msg='',
allow_object_dtype=False, verbose=True):
"""Assert that x and y arrays are exactly equal."""
if check_dtypes:
self.assertDtypesMatch(actual, desired)
actual = np.asarray(actual)
desired = np.asarray(desired)
if (not allow_object_dtype) and (actual.dtype == object or desired.dtype == object):
# See https://github.com/jax-ml/jax/issues/17867
raise TypeError(
"assertArraysEqual may be poorly behaved when np.asarray casts to dtype=object. "
"If comparing PRNG keys, consider random_test.KeyArrayTest.assertKeysEqual. "
"If comparing collections of arrays, consider using assertAllClose. "
"To let this test proceed anyway, pass allow_object_dtype=True.")
# Work around https://github.com/numpy/numpy/issues/18992
with np.errstate(over='ignore'):
np.testing.assert_array_equal(actual, desired, err_msg=err_msg,
verbose=verbose)
def assertArraysAllClose(self, actual, desired, *, check_dtypes=True, atol=None,
rtol=None, err_msg=''):
"""Assert that actual and desired are close (up to numerical tolerances)."""
self.assertEqual(actual.shape, desired.shape)
atol = max(tolerance(_dtype(actual), atol), tolerance(_dtype(desired), atol))
rtol = max(tolerance(_dtype(actual), rtol), tolerance(_dtype(desired), rtol))
_assert_numpy_allclose(actual, desired, atol=atol, rtol=rtol, err_msg=err_msg)
if check_dtypes:
self.assertDtypesMatch(actual, desired)
def assertDtypesMatch(self, actual, desired, *, canonicalize_dtypes=True):
if not config.enable_x64.value and canonicalize_dtypes:
self.assertEqual(_dtypes.canonicalize_dtype(_dtype(actual), allow_extended_dtype=True),
_dtypes.canonicalize_dtype(_dtype(desired), allow_extended_dtype=True))
else:
self.assertEqual(_dtype(actual), _dtype(desired))
def assertAllClose(self, actual, desired, *, check_dtypes=True, atol=None, rtol=None,
canonicalize_dtypes=True, err_msg=''):
"""Assert that actual and desired, either arrays or nested tuples/lists, are close."""
if isinstance(actual, dict):
self.assertIsInstance(desired, dict)
self.assertEqual(set(actual.keys()), set(desired.keys()))
for k in actual.keys():
self.assertAllClose(actual[k], desired[k], check_dtypes=check_dtypes, atol=atol,
rtol=rtol, canonicalize_dtypes=canonicalize_dtypes,
err_msg=err_msg)
elif is_sequence(actual) and not hasattr(actual, '__array__'):
self.assertTrue(is_sequence(desired) and not hasattr(desired, '__array__'))
self.assertEqual(len(actual), len(desired))
for actual_elt, desired_elt in zip(actual, desired):
self.assertAllClose(actual_elt, desired_elt, check_dtypes=check_dtypes, atol=atol,
rtol=rtol, canonicalize_dtypes=canonicalize_dtypes,
err_msg=err_msg)
elif hasattr(actual, '__array__') or np.isscalar(actual):
self.assertTrue(hasattr(desired, '__array__') or np.isscalar(desired))
if check_dtypes:
self.assertDtypesMatch(actual, desired, canonicalize_dtypes=canonicalize_dtypes)
actual = np.asarray(actual)
desired = np.asarray(desired)
self.assertArraysAllClose(actual, desired, check_dtypes=False, atol=atol, rtol=rtol,
err_msg=err_msg)
elif actual == desired:
return
else:
raise TypeError((type(actual), type(desired)))
def assertMultiLineStrippedEqual(self, expected, what):
"""Asserts two strings are equal, after dedenting and stripping each line."""
expected = textwrap.dedent(expected)
what = textwrap.dedent(what)
ignore_space_re = re.compile(r'\s*\n\s*')
expected_clean = re.sub(ignore_space_re, '\n', expected.strip())
what_clean = re.sub(ignore_space_re, '\n', what.strip())
if what_clean != expected_clean:
# Print it so we can copy-and-paste it into the test
print(f"Found\n{what}\n")
self.assertMultiLineEqual(expected_clean, what_clean,
msg=f"Found\n{what}\nExpecting\n{expected}")
@contextmanager
def assertNoWarnings(self):
with test_warning_util.raise_on_warnings():
yield
# We replace assertWarns and assertWarnsRegex with functions that use the
# thread-safe warning utilities. Unlike the unittest versions these only
# function as context managers.
@contextmanager
def assertWarns(self, warning, *, msg=None):
with test_warning_util.record_warnings() as ws:
yield
for w in ws:
if not isinstance(w.message, warning):
continue
if msg is not None and msg not in str(w.message):
continue
return
self.fail(f"Expected warning not found {warning}:'{msg}', got "
f"{ws}")
@contextmanager
def assertWarnsRegex(self, warning, regex):
if regex is not None and not isinstance(regex, re.Pattern):
regex = re.compile(regex)
with test_warning_util.record_warnings() as ws:
yield
for w in ws:
if not isinstance(w.message, warning):
continue
if regex is not None and not regex.search(str(w.message)):
continue
return
self.fail(f"Expected warning not found {warning}:'{regex}', "
f"got warnings: {[str(w.message) for w in ws]}")
def _CompileAndCheck(self, fun, args_maker, *, check_dtypes=True, tol=None,
rtol=None, atol=None, check_cache_misses=True):
"""Helper method for running JAX compilation and allclose assertions."""
args = args_maker()
def wrapped_fun(*args):
self.assertTrue(python_should_be_executing)
return fun(*args)
python_should_be_executing = True
python_ans = fun(*args)
python_shapes = tree_map(lambda x: np.shape(x), python_ans)
np_shapes = tree_map(lambda x: np.shape(np.asarray(x)), python_ans)
self.assertEqual(python_shapes, np_shapes)
cache_misses = dispatch.xla_primitive_callable.cache_info().misses
python_ans = fun(*args)
if check_cache_misses and TEST_NUM_THREADS.value <= 1:
self.assertEqual(
cache_misses, dispatch.xla_primitive_callable.cache_info().misses,
"Compilation detected during second call of {} in op-by-op "
"mode.".format(fun))
cfun = api.jit(wrapped_fun)
python_should_be_executing = True
monitored_ans = cfun(*args)
python_should_be_executing = False
compiled_ans = cfun(*args)
self.assertAllClose(monitored_ans, python_ans, check_dtypes=check_dtypes,
atol=atol or tol, rtol=rtol or tol)
self.assertAllClose(compiled_ans, python_ans, check_dtypes=check_dtypes,
atol=atol or tol, rtol=rtol or tol)
args = args_maker()
python_should_be_executing = True
python_ans = fun(*args)
python_should_be_executing = False
compiled_ans = cfun(*args)
self.assertAllClose(compiled_ans, python_ans, check_dtypes=check_dtypes,
atol=atol or tol, rtol=rtol or tol)
def _CheckAgainstNumpy(self, numpy_reference_op, lax_op, args_maker,
check_dtypes=True, tol=None, atol=None, rtol=None,
canonicalize_dtypes=True):
args = args_maker()
lax_ans = lax_op(*args)
numpy_ans = numpy_reference_op(*args)
self.assertAllClose(lax_ans, numpy_ans, check_dtypes=check_dtypes,
atol=atol or tol, rtol=rtol or tol,
canonicalize_dtypes=canonicalize_dtypes)
def assertCacheMisses(self,
func: Callable[[], Any], *,
cpp: int | None = None,
aot_call: int | None = None,
tracing: int | None = None,
lowering: int | None = None,
compilation_after_persistent_cache_miss: int | None = None):
with (count_pjit_cpp_cache_miss() as cpp_count,
count_aot_jit_cpp_cache_miss() as aot_call_count,
count_jit_tracing_cache_miss() as tracing_count,
count_jit_and_pmap_lowerings() as lowering_count,
count_compilation_after_persistent_cache_miss() as compilation_count):
func()
if cpp is not None:
self.assertEqual(cpp, cpp_count())
if aot_call is not None:
self.assertEqual(aot_call, aot_call_count())
if tracing is not None:
self.assertEqual(tracing, tracing_count())
if lowering is not None:
self.assertEqual(lowering, lowering_count())
if compilation_after_persistent_cache_miss is not None:
self.assertEqual(compilation_after_persistent_cache_miss,
compilation_count())
_PJIT_IMPLEMENTATION = api.jit
_PJIT_IMPLEMENTATION._name = "jit"
_NOOP_JIT_IMPLEMENTATION = lambda x, *args, **kwargs: x
_NOOP_JIT_IMPLEMENTATION._name = "noop"
JIT_IMPLEMENTATION = (
_PJIT_IMPLEMENTATION,
_NOOP_JIT_IMPLEMENTATION,
)
|
JaxTestCase
|
python
|
getsentry__sentry
|
tests/sentry/integrations/web/test_organization_integration_setup.py
|
{
"start": 211,
"end": 768
}
|
class ____(PermissionTestCase):
def setUp(self) -> None:
super().setUp()
self.path = f"/organizations/{self.organization.slug}/integrations/example/setup/"
# this currently redirects the user
@pytest.mark.xfail
def test_manager_can_load(self) -> None:
self.assert_role_can_access(self.path, "manager")
# this currently redirects the user
@pytest.mark.xfail
def test_owner_can_load(self) -> None:
self.assert_owner_can_access(self.path)
@control_silo_test
|
OrganizationIntegrationSetupPermissionTest
|
python
|
getsentry__sentry
|
src/sentry/rules/conditions/event_attribute.py
|
{
"start": 13357,
"end": 13819
}
|
class ____(AttributeHandler):
minimum_path_length = 2
@classmethod
def _handle(cls, path: list[str], event: GroupEvent) -> list[str]:
if path[1] in ("in_foreground"):
contexts = event.data.get("contexts", {})
response = contexts.get("app")
if response is None:
response = {}
return [response.get(path[1])]
return []
@attribute_registry.register("os")
|
AppAttributeHandler
|
python
|
pytorch__pytorch
|
torch/distributed/elastic/rendezvous/__init__.py
|
{
"start": 4230,
"end": 6269
}
|
class ____ implements the rendezvous mechanism described above. It is a backend-
agnostic type that expects a particular :py:class:`.RendezvousBackend` instance
to be specified during construction.
Torch distributed users can either implement their own backend type or use one
of the following implementations that come with PyTorch:
- :py:class:`.C10dRendezvousBackend`: Uses a C10d store (by default
``TCPStore``) as the rendezvous backend. The main advantage of using a C10d
store is that it requires no 3rd-party dependency (such as etcd) to establish
a rendezvous.
- :py:class:`.EtcdRendezvousBackend`: Supersedes the legacy
:py:class:`.EtcdRendezvousHandler` class. Passing an
:py:class:`.EtcdRendezvousBackend` instance to
:py:class:`.DynamicRendezvousHandler` is functionally equivalent to
instantiating an :py:class:`.EtcdRendezvousHandler`.
::
store = TCPStore("localhost")
backend = C10dRendezvousBackend(store, "my_run_id")
rdzv_handler = DynamicRendezvousHandler.from_backend(
run_id="my_run_id", store=store, backend=backend, min_nodes=2, max_nodes=4
)
"""
from .api import (
rendezvous_handler_registry,
RendezvousClosedError,
RendezvousConnectionError,
RendezvousError,
RendezvousGracefulExitError,
RendezvousHandler,
RendezvousHandlerCreator,
RendezvousHandlerRegistry,
RendezvousInfo,
RendezvousParameters,
RendezvousStateError,
RendezvousStoreInfo,
RendezvousTimeoutError,
)
from .registry import _register_default_handlers, _register_out_of_tree_handlers
_register_default_handlers()
_register_out_of_tree_handlers()
__all__ = [
"RendezvousClosedError",
"RendezvousConnectionError",
"RendezvousError",
"RendezvousGracefulExitError",
"RendezvousHandler",
"RendezvousHandlerCreator",
"RendezvousHandlerRegistry",
"RendezvousInfo",
"RendezvousParameters",
"RendezvousStateError",
"RendezvousStoreInfo",
"RendezvousTimeoutError",
"rendezvous_handler_registry",
]
|
that
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_ignore_error01.py
|
{
"start": 315,
"end": 813
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("ignore_error01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_string("A1", "123")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
PyCQA__pylint
|
tests/functional/r/raising/raising_self.py
|
{
"start": 37,
"end": 213
}
|
class ____(Exception):
def __init__(self):
Exception.__init__(self)
def return_self(self):
return self
raise MultiException().return_self()
|
MultiException
|
python
|
huggingface__transformers
|
tests/tensor_parallel/test_tensor_parallel.py
|
{
"start": 19027,
"end": 19155
}
|
class ____(TestTensorParallelBase):
"""Test tensor parallel with 4 processes."""
nproc_per_node = 4
|
TestTensorParallel4Proc
|
python
|
django__django
|
django/db/models/fields/__init__.py
|
{
"start": 99184,
"end": 99419
}
|
class ____(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta):
def get_internal_type(self):
return "AutoField"
def rel_db_type(self, connection):
return IntegerField().db_type(connection=connection)
|
AutoField
|
python
|
google__jax
|
jax/_src/pallas/core.py
|
{
"start": 4209,
"end": 4801
}
|
class ____:
"""Specifies how a block should be buffered for a pipeline.
Attributes:
buffer_count: The number of buffers to use for multiple buffering.
use_lookahead: optional bool, indicates whether to use lookahead on the
buffer. Enabling lookahead allows the pipeline to begin fetching the next
changed block as soon as a slot is available, no matter how many
iterations ahead that block is.
"""
buffer_count: int
use_lookahead: bool = False
split_list = util.split_list
map, unsafe_map = util.safe_map, map
zip, unsafe_zip = util.safe_zip, zip
|
Buffered
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/dataviews.py
|
{
"start": 117659,
"end": 135009
}
|
class ____(Response):
"""
Response of dataviews.get_by_id endpoint.
:param dataview: Dataview information
:type dataview: Dataview
"""
_service = "dataviews"
_action = "get_by_id"
_version = "2.23"
_schema = {
"definitions": {
"augmentation": {
"properties": {
"crop_around_rois": {
"description": "Crop image data around all frame ROIs",
"type": ["boolean", "null"],
},
"sets": {
"description": "List of augmentation sets",
"items": {"$ref": "#/definitions/augmentation_set"},
"type": ["array", "null"],
},
},
"type": "object",
},
"augmentation_set": {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class",
"type": ["string", "null"],
},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dataview": {
"properties": {
"augmentation": {
"$ref": "#/definitions/augmentation",
"description": "Augmentation parameters. Only for training and testing tasks.",
},
"company": {"description": "Company id", "type": "string"},
"created": {
"description": "Dataview creation time (UTC) ",
"format": "date-time",
"type": "string",
},
"description": {
"description": "Dataview description",
"type": "string",
},
"filters": {
"description": "List of FilterRule ('OR' connection)",
"items": {"$ref": "#/definitions/filter_rule"},
"type": "array",
},
"id": {"description": "Dataview ID", "type": "string"},
"iteration": {
"$ref": "#/definitions/iteration",
"description": "Iteration parameters. Not applicable for register (import) tasks.",
},
"labels_enumeration": {
"additionalProperties": {"type": "integer"},
"description": (
"Labels enumerations, specifies numbers to be assigned to ROI labels when getting frames"
),
"type": "object",
},
"mapping": {
"$ref": "#/definitions/mapping",
"description": "Mapping parameters",
},
"name": {"description": "Dataview name", "type": "string"},
"output_rois": {
"$ref": "#/definitions/output_rois_enum",
"default": "all_in_frame",
"description": (
"'all_in_frame' - all rois for a frame are returned\n 'only_filtered' - only"
" rois which led this frame to be selected\n 'frame_per_roi' - single roi"
" per frame. Frame can be returned multiple times with a different roi each time.\n "
" Note: this should be used for Training tasks only\n Note:"
" frame_per_roi implies that only filtered rois will be returned\n "
),
},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": "string",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"user": {"description": "Associated user id", "type": "string"},
"versions": {
"description": "List of dataview entries. All tasks must have at least one dataview.",
"items": {"$ref": "#/definitions/dataview_entry"},
"type": "array",
},
"status": {
"description": "dataview status",
"enum": ["draft", "published"],
"type": "string",
},
},
"required": ["id", "name"],
"type": "object",
},
"dataview_entry": {
"properties": {
"dataset": {
"description": "Existing Dataset id",
"type": "string",
},
"merge_with": {
"description": "Version ID to merge with",
"type": "string",
},
"version": {
"description": "Version id of a version belonging to the dataset",
"type": "string",
},
},
"required": ["dataset", "version"],
"type": "object",
},
"filter_by_roi_enum": {
"default": "label_rules",
"enum": ["disabled", "no_rois", "label_rules"],
"type": "string",
},
"filter_label_rule": {
"properties": {
"conf_range": {
"description": (
"Range of ROI confidence level in the frame (min, max). -1 for not applicable\n "
" Both min and max can be either -1 or positive.\n 2nd number (max) must be"
" either -1 or larger than or equal to the 1st number (min)"
),
"items": {"type": "number"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"count_range": {
"description": (
"Range of times ROI appears in the frame (min, max). -1 for not applicable.\n "
" Both integers must be larger than or equal to -1.\n 2nd integer (max) must be"
" either -1 or larger than or equal to the 1st integer (min)"
),
"items": {"type": "integer"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"label": {
"description": (
"Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and"
" default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent"
" to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'"
),
"type": "string",
},
"must_not": {
"default": False,
"description": (
"If set then the label must not exist or lucene query must not be true.\n The"
" default value is false"
),
"type": "boolean",
},
},
"required": ["label"],
"type": "object",
},
"filter_rule": {
"properties": {
"dataset": {
"description": (
"Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in"
" View are used."
),
"type": "string",
},
"filter_by_roi": {
"description": "Type of filter. Optional, the default value is 'label_rules'",
"oneOf": [
{"$ref": "#/definitions/filter_by_roi_enum"},
{"type": "null"},
],
},
"frame_query": {
"description": "Frame filter, in Lucene query syntax",
"type": ["string", "null"],
},
"label_rules": {
"description": (
"List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all"
" frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without"
" ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules"
),
"items": {"$ref": "#/definitions/filter_label_rule"},
"type": ["array", "null"],
},
"sources_query": {
"description": "Sources filter, in Lucene query syntax. Filters sources in each frame.",
"type": ["string", "null"],
},
"version": {
"description": (
"Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If"
" set to '*' all version of the datasets in View are used."
),
"type": "string",
},
"weight": {
"description": "Rule weight. Default is 1",
"type": "number",
},
},
"required": ["dataset"],
"type": "object",
},
"iteration": {
"description": "Sequential Iteration API configuration",
"properties": {
"infinite": {
"description": "Infinite iteration",
"type": ["boolean", "null"],
},
"jump": {
"description": "Jump entry",
"oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}],
},
"limit": {
"description": (
"Maximum frames per task. If not passed, frames will end when no more matching frames are"
" found, unless infinite is True."
),
"type": ["integer", "null"],
},
"min_sequence": {
"description": (
"Length (in ms) of video clips to return. This is used in random order, and in sequential"
" order only if jumping is provided and only for video frames"
),
"type": ["integer", "null"],
},
"order": {
"description": (
"\n Input frames order. Values: 'sequential', 'random'\n In"
" Sequential mode frames will be returned according to the order in which the frames were"
" added to the dataset."
),
"oneOf": [
{"$ref": "#/definitions/iteration_order_enum"},
{"type": "null"},
],
},
"random_seed": {
"description": "Random seed used when iterating over the dataview",
"type": ["integer", "null"],
},
},
"type": "object",
},
"iteration_order_enum": {
"enum": ["sequential", "random"],
"type": "string",
},
"jump": {
"properties": {
"time": {
"description": "Max time in milliseconds between frames",
"type": ["integer", "null"],
}
},
"type": "object",
},
"label_source": {
"properties": {
"dataset": {
"description": "Source dataset id. '*' for all datasets in view",
"type": ["string", "null"],
},
"labels": {
"description": (
"List of source labels (AND connection). '*' indicates any label. Labels must exist in at"
" least one of the dataset versions in the task's view"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"version": {
"description": (
"Source dataset version id. Default is '*' (for all versions in dataset in the view)"
" Version must belong to the selected dataset, and must be in the task's view[i]"
),
"type": ["string", "null"],
},
},
"type": "object",
},
"mapping": {
"properties": {
"rules": {
"description": "Rules list",
"items": {"$ref": "#/definitions/mapping_rule"},
"type": ["array", "null"],
}
},
"type": "object",
},
"mapping_rule": {
"properties": {
"source": {
"description": "Source label info",
"oneOf": [
{"$ref": "#/definitions/label_source"},
{"type": "null"},
],
},
"target": {
"description": "Target label name",
"type": ["string", "null"],
},
},
"type": "object",
},
"output_rois_enum": {
"enum": ["all_in_frame", "only_filtered", "frame_per_roi"],
"type": "string",
},
},
"properties": {
"dataview": {
"description": "Dataview information",
"oneOf": [{"$ref": "#/definitions/dataview"}, {"type": "null"}],
}
},
"type": "object",
}
def __init__(self, dataview=None, **kwargs):
super(GetByIdResponse, self).__init__(**kwargs)
self.dataview = dataview
@schema_property("dataview")
def dataview(self):
return self._property_dataview
@dataview.setter
def dataview(self, value):
if value is None:
self._property_dataview = None
return
if isinstance(value, dict):
value = Dataview.from_dict(value)
else:
self.assert_isinstance(value, "dataview", Dataview)
self._property_dataview = value
|
GetByIdResponse
|
python
|
oauthlib__oauthlib
|
oauthlib/openid/connect/core/exceptions.py
|
{
"start": 824,
"end": 1223
}
|
class ____(OpenIDClientError):
"""
The Authorization Server requires End-User authentication.
This error MAY be returned when the prompt parameter value in the
Authentication Request is none, but the Authentication Request cannot be
completed without displaying a user interface for End-User authentication.
"""
error = 'login_required'
status_code = 401
|
LoginRequired
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/losses.py
|
{
"start": 12765,
"end": 14743
}
|
class ____(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
`loss = abs(y_true - y_pred)`
Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError()
>>> mae(y_true, y_pred).numpy()
0.5
>>> # Calling with 'sample_weight'.
>>> mae(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
0.25
>>> # Using 'sum' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mae(y_true, y_pred).numpy()
1.0
>>> # Using 'none' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mae(y_true, y_pred).numpy()
array([0.5, 0.5], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsoluteError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_error'):
"""Initializes `MeanAbsoluteError` instance.
Args:
reduction: Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the instance. Defaults to 'mean_absolute_error'.
"""
super().__init__(mean_absolute_error, name=name, reduction=reduction)
|
MeanAbsoluteError
|
python
|
vyperlang__vyper
|
vyper/ast/nodes.py
|
{
"start": 34637,
"end": 34756
}
|
class ____(Operator):
__slots__ = ()
_description = "bitwise xor"
_pretty = "^"
_op = operator.xor
|
BitXor
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/contrib/rest_framework/authentication.py
|
{
"start": 205,
"end": 1789
}
|
class ____(BaseAuthentication):
"""
OAuth 2 authentication backend using `django-oauth-toolkit`
"""
www_authenticate_realm = "api"
def _dict_to_string(self, my_dict):
"""
Return a string of comma-separated key-value pairs (e.g. k="v",k2="v2").
"""
return ",".join(['{k}="{v}"'.format(k=k, v=v) for k, v in my_dict.items()])
def authenticate(self, request):
"""
Returns two-tuple of (user, token) if authentication succeeds,
or None otherwise.
"""
if request is None:
return None
oauthlib_core = get_oauthlib_core()
try:
valid, r = oauthlib_core.verify_request(request, scopes=[])
except ValueError as error:
if str(error) == "Invalid hex encoding in query string.":
raise SuspiciousOperation(error)
raise
else:
if valid:
return r.user, r.access_token
request.oauth2_error = getattr(r, "oauth2_error", {})
return None
def authenticate_header(self, request):
"""
Bearer is the only finalized type currently
"""
www_authenticate_attributes = OrderedDict(
[
("realm", self.www_authenticate_realm),
]
)
oauth2_error = getattr(request, "oauth2_error", {})
www_authenticate_attributes.update(oauth2_error)
return "Bearer {attributes}".format(
attributes=self._dict_to_string(www_authenticate_attributes),
)
|
OAuth2Authentication
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_dsa.py
|
{
"start": 28724,
"end": 37358
}
|
class ____:
@pytest.mark.parametrize(
("fmt", "password"),
itertools.product(
[
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.PrivateFormat.PKCS8,
],
[
b"s",
b"longerpassword",
b"!*$&(@#$*&($T@%_somesymbols",
b"\x01" * 1000,
],
),
)
def test_private_bytes_encrypted_pem(self, backend, fmt, password):
skip_fips_traditional_openssl(backend, fmt)
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "unenc-dsa-pkcs8.pem"),
lambda pemfile: pemfile.read().encode(),
)
key = serialization.load_pem_private_key(key_bytes, None, backend)
assert isinstance(key, dsa.DSAPrivateKey)
serialized = key.private_bytes(
serialization.Encoding.PEM,
fmt,
serialization.BestAvailableEncryption(password),
)
loaded_key = serialization.load_pem_private_key(
serialized, password, backend
)
assert isinstance(loaded_key, dsa.DSAPrivateKey)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.parametrize(
("encoding", "fmt"),
[
(serialization.Encoding.Raw, serialization.PrivateFormat.PKCS8),
(serialization.Encoding.DER, serialization.PrivateFormat.Raw),
(serialization.Encoding.Raw, serialization.PrivateFormat.Raw),
(serialization.Encoding.X962, serialization.PrivateFormat.PKCS8),
(
serialization.Encoding.SMIME,
serialization.PrivateFormat.TraditionalOpenSSL,
),
],
)
def test_private_bytes_rejects_invalid(self, encoding, fmt, backend):
key = DSA_KEY_1024.private_key(backend)
with pytest.raises((ValueError, TypeError)):
key.private_bytes(encoding, fmt, serialization.NoEncryption())
@pytest.mark.parametrize(
("fmt", "password"),
[
[serialization.PrivateFormat.PKCS8, b"s"],
[serialization.PrivateFormat.PKCS8, b"longerpassword"],
[serialization.PrivateFormat.PKCS8, b"!*$&(@#$*&($T@%_somesymbol"],
[serialization.PrivateFormat.PKCS8, b"\x01" * 1000],
],
)
def test_private_bytes_encrypted_der(self, backend, fmt, password):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "unenc-dsa-pkcs8.pem"),
lambda pemfile: pemfile.read().encode(),
)
key = serialization.load_pem_private_key(key_bytes, None, backend)
assert isinstance(key, dsa.DSAPrivateKey)
serialized = key.private_bytes(
serialization.Encoding.DER,
fmt,
serialization.BestAvailableEncryption(password),
)
loaded_key = serialization.load_der_private_key(
serialized, password, backend
)
assert isinstance(loaded_key, dsa.DSAPrivateKey)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.parametrize(
("encoding", "fmt", "loader_func"),
[
[
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.load_pem_private_key,
],
[
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.load_der_private_key,
],
[
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.load_pem_private_key,
],
[
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.load_der_private_key,
],
],
)
def test_private_bytes_unencrypted(
self, backend, encoding, fmt, loader_func
):
key = DSA_KEY_1024.private_key(backend)
serialized = key.private_bytes(
encoding, fmt, serialization.NoEncryption()
)
loaded_key = loader_func(serialized, None, backend)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.skip_fips(
reason="Traditional OpenSSL key format is not supported in FIPS mode."
)
@pytest.mark.parametrize(
("key_path", "encoding", "loader_func"),
[
[
os.path.join(
"asymmetric",
"Traditional_OpenSSL_Serialization",
"dsa.1024.pem",
),
serialization.Encoding.PEM,
serialization.load_pem_private_key,
],
[
os.path.join(
"asymmetric", "DER_Serialization", "dsa.1024.der"
),
serialization.Encoding.DER,
serialization.load_der_private_key,
],
],
)
def test_private_bytes_traditional_openssl_unencrypted(
self, backend, key_path, encoding, loader_func
):
key_bytes = load_vectors_from_file(
key_path, lambda pemfile: pemfile.read(), mode="rb"
)
key = loader_func(key_bytes, None, backend)
serialized = key.private_bytes(
encoding,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption(),
)
assert serialized == key_bytes
def test_private_bytes_traditional_der_encrypted_invalid(self, backend):
key = DSA_KEY_1024.private_key(backend)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.BestAvailableEncryption(b"password"),
)
def test_private_bytes_invalid_encoding(self, backend):
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "unenc-dsa-pkcs8.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read().encode(), None, backend
),
)
with pytest.raises(TypeError):
key.private_bytes(
"notencoding", # type: ignore[arg-type]
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
)
def test_private_bytes_invalid_format(self, backend):
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "unenc-dsa-pkcs8.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read().encode(), None, backend
),
)
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
"invalidformat", # type: ignore[arg-type]
serialization.NoEncryption(),
)
def test_private_bytes_invalid_encryption_algorithm(self, backend):
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "unenc-dsa-pkcs8.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read().encode(), None, backend
),
)
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
"notanencalg", # type: ignore[arg-type]
)
def test_private_bytes_unsupported_encryption_type(self, backend):
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "unenc-dsa-pkcs8.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read().encode(), None, backend
),
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
DummyKeySerializationEncryption(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.dsa_supported(),
skip_message="Does not support DSA.",
)
|
TestDSASerialization
|
python
|
sympy__sympy
|
sympy/physics/mechanics/method.py
|
{
"start": 37,
"end": 660
}
|
class ____(ABC):
"""Abstract Base Class for all methods."""
@abstractmethod
def q(self):
pass
@abstractmethod
def u(self):
pass
@abstractmethod
def bodies(self):
pass
@abstractmethod
def loads(self):
pass
@abstractmethod
def mass_matrix(self):
pass
@abstractmethod
def forcing(self):
pass
@abstractmethod
def mass_matrix_full(self):
pass
@abstractmethod
def forcing_full(self):
pass
def _form_eoms(self):
raise NotImplementedError("Subclasses must implement this.")
|
_Methods
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_color.py
|
{
"start": 9064,
"end": 9202
}
|
class ____(scale_color_gradientn):
"""
Create a n color gradient
"""
_aesthetics = ["fill"]
@dataclass
|
scale_fill_gradientn
|
python
|
huggingface__transformers
|
src/transformers/models/ernie/modeling_ernie.py
|
{
"start": 22868,
"end": 24357
}
|
class ____(PreTrainedModel):
config_class = ErnieConfig
base_model_prefix = "ernie"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": ErnieLayer,
"attentions": ErnieSelfAttention,
"cross_attentions": ErnieCrossAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, ErnieLMPredictionHead):
init.zeros_(module.bias)
@auto_docstring(
custom_intro="""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
)
|
ErniePreTrainedModel
|
python
|
getsentry__sentry
|
src/sentry/models/groupresolution.py
|
{
"start": 688,
"end": 7806
}
|
class ____(Model):
"""
Describes when a group was marked as resolved.
"""
__relocation_scope__ = RelocationScope.Excluded
class Type:
in_release = 0
in_next_release = 1
class Status:
pending = 0
resolved = 1
group = FlexibleForeignKey("sentry.Group", unique=True)
# the release in which its suggested this was resolved
# which allows us to indicate if it still happens in newer versions
release = FlexibleForeignKey("sentry.Release")
# This release field represents the latest release version associated with a group when the
# user chooses "resolve in next release", and is set for both semver and date ordered releases
current_release_version = models.CharField(max_length=DB_VERSION_LENGTH, null=True, blank=True)
# This release field represents the future release version associated with a group when the
# user chooses "resolve in future release"
future_release_version = models.CharField(max_length=DB_VERSION_LENGTH, null=True, blank=True)
type = BoundedPositiveIntegerField(
choices=((Type.in_next_release, "in_next_release"), (Type.in_release, "in_release")),
null=True,
)
actor_id = BoundedPositiveIntegerField(null=True)
datetime = models.DateTimeField(default=timezone.now, db_index=True)
status = BoundedPositiveIntegerField(
default=Status.pending,
choices=((Status.pending, _("Pending")), (Status.resolved, _("Resolved"))),
)
class Meta:
db_table = "sentry_groupresolution"
app_label = "sentry"
indexes = [
models.Index(
fields=["type", "status", "future_release_version"],
name="groupres_future_release_idx",
),
]
__repr__ = sane_repr("group_id", "release_id")
@classmethod
def has_resolution(cls, group, release):
"""
Determine if a resolution exists for the given group and release.
This is used to suggest if a regression has occurred.
"""
def compare_release_dates_for_in_next_release(res_release, res_release_datetime, release):
"""
Helper function that compares release versions based on date for
`GroupResolution.Type.in_next_release`
"""
return res_release == release.id or res_release_datetime > release.date_added
try:
(
res_type,
res_release,
res_release_version,
res_release_datetime,
current_release_version,
) = (
cls.objects.filter(group=group)
.select_related("release")
.values_list(
"type",
"release__id",
"release__version",
"release__date_added",
"current_release_version",
)[0]
)
except IndexError:
return False
# if no release is present, we assume we've gone from "no release" to "some release"
# in application configuration, and thus this must be older
if not release:
return True
follows_semver = follows_semver_versioning_scheme(
project_id=group.project.id,
org_id=group.organization.id,
release_version=release.version,
)
# if current_release_version was set, then it means that initially Group was resolved in
# next release, which means a release will have a resolution if it is the same as
# `current_release_version` or was released before it according to either its semver version
# or its date. We make that decision based on whether the project follows semantic
# versioning or not
if current_release_version:
if follows_semver:
try:
# If current_release_version == release.version => 0
# If current_release_version < release.version => -1
# If current_release_version > release.version => 1
current_release_raw = parse_release(
current_release_version, json_loads=orjson.loads
).get("version_raw")
release_raw = parse_release(release.version, json_loads=orjson.loads).get(
"version_raw"
)
return compare_version_relay(current_release_raw, release_raw) >= 0
except RelayError:
...
else:
try:
current_release_obj = Release.objects.get(
organization_id=group.organization.id, version=current_release_version
)
return compare_release_dates_for_in_next_release(
res_release=current_release_obj.id,
res_release_datetime=current_release_obj.date_added,
release=release,
)
except Release.DoesNotExist:
...
# We still fallback to the older model if either current_release_version was not set (
# i.e. In all resolved cases except for Resolved in Next Release) or if for whatever
# reason the semver/date checks fail (which should not happen!)
if res_type in (None, cls.Type.in_next_release):
# Add metric here to ensure that this code branch ever runs given that
# clear_expired_resolutions changes the type to `in_release` once a Release instance
# is created
metrics.incr("groupresolution.has_resolution.in_next_release", sample_rate=1.0)
return compare_release_dates_for_in_next_release(
res_release=res_release, res_release_datetime=res_release_datetime, release=release
)
elif res_type == cls.Type.in_release:
# If release id provided is the same as resolved release id then return False
# regardless of whether it is a semver project or not
if res_release == release.id:
return False
if follows_semver:
try:
# A resolution only exists if the resolved release is greater (in semver
# terms) than the provided release
res_release_raw = parse_release(
res_release_version, json_loads=orjson.loads
).get("version_raw")
release_raw = parse_release(release.version, json_loads=orjson.loads).get(
"version_raw"
)
return compare_version_relay(res_release_raw, release_raw) == 1
except RelayError:
...
# Fallback to older model if semver comparison fails due to whatever reason
return res_release_datetime >= release.date_added
else:
raise NotImplementedError
|
GroupResolution
|
python
|
fastai__fastai
|
fastai/metrics.py
|
{
"start": 22473,
"end": 23194
}
|
class ____(AvgMetric):
"Create a metric from `loss_func.attr` named `nm`"
def __init__(self, attr, nm=None): store_attr('attr,nm')
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(getattr(learn.loss_func, self.attr, 0))*bs
self.count += bs
@property
def name(self): return self.attr if self.nm is None else self.nm
# %% ../nbs/13b_metrics.ipynb 131
def LossMetrics(attrs, nms=None):
"List of `LossMetric` for each of `attrs` and `nms`"
if isinstance(attrs, str): attrs = attrs.split(',')
nms = attrs if nms is None else nms.split(',') if isinstance(nms, str) else nms
return [LossMetric(a, n) for a,n in zip(attrs,nms)]
|
LossMetric
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py
|
{
"start": 27842,
"end": 36029
}
|
class ____(Qwen3VLMoePreTrainedModel):
config: Qwen3VLMoeVisionConfig
_no_split_modules = ["Qwen3VLMoeVisionBlock"]
def __init__(self, config, *inputs, **kwargs) -> None:
super().__init__(config, *inputs, **kwargs)
self.spatial_merge_size = config.spatial_merge_size
self.patch_size = config.patch_size
self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size
self.patch_embed = Qwen3VLMoeVisionPatchEmbed(
config=config,
)
self.pos_embed = nn.Embedding(config.num_position_embeddings, config.hidden_size)
self.num_grid_per_side = int(config.num_position_embeddings**0.5)
head_dim = config.hidden_size // config.num_heads
self.rotary_pos_emb = Qwen3VLMoeVisionRotaryEmbedding(head_dim // 2)
self.blocks = nn.ModuleList([Qwen3VLMoeVisionBlock(config) for _ in range(config.depth)])
self.merger = Qwen3VLMoeVisionPatchMerger(
config=config,
use_postshuffle_norm=False,
)
self.deepstack_visual_indexes = config.deepstack_visual_indexes
self.deepstack_merger_list = nn.ModuleList(
[
Qwen3VLMoeVisionPatchMerger(
config=config,
use_postshuffle_norm=True,
)
for _ in range(len(config.deepstack_visual_indexes))
]
)
self.gradient_checkpointing = False
def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
merge_size = self.spatial_merge_size
max_hw = int(grid_thw[:, 1:].max().item())
freq_table = self.rotary_pos_emb(max_hw) # (max_hw, dim // 2)
device = freq_table.device
total_tokens = int(torch.prod(grid_thw, dim=1).sum().item())
pos_ids = torch.empty((total_tokens, 2), dtype=torch.long, device=device)
offset = 0
for num_frames, height, width in grid_thw:
merged_h, merged_w = height // merge_size, width // merge_size
block_rows = torch.arange(merged_h, device=device) # block row indices
block_cols = torch.arange(merged_w, device=device) # block col indices
intra_row = torch.arange(merge_size, device=device) # intra-block row offsets
intra_col = torch.arange(merge_size, device=device) # intra-block col offsets
# Compute full-resolution positions
row_idx = block_rows[:, None, None, None] * merge_size + intra_row[None, None, :, None]
col_idx = block_cols[None, :, None, None] * merge_size + intra_col[None, None, None, :]
row_idx = row_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1)
col_idx = col_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1)
coords = torch.stack((row_idx, col_idx), dim=-1)
if num_frames > 1:
coords = coords.repeat(num_frames, 1)
num_tokens = coords.shape[0]
pos_ids[offset : offset + num_tokens] = coords
offset += num_tokens
embeddings = freq_table[pos_ids] # lookup rotary embeddings
embeddings = embeddings.flatten(1)
return embeddings
def fast_pos_embed_interpolate(self, grid_thw):
grid_ts, grid_hs, grid_ws = grid_thw[:, 0], grid_thw[:, 1], grid_thw[:, 2]
device = grid_thw.device
idx_list = [[] for _ in range(4)]
weight_list = [[] for _ in range(4)]
for t, h, w in zip(grid_ts, grid_hs, grid_ws):
h_idxs = torch.linspace(0, self.num_grid_per_side - 1, h)
w_idxs = torch.linspace(0, self.num_grid_per_side - 1, w)
h_idxs_floor = h_idxs.int()
w_idxs_floor = w_idxs.int()
h_idxs_ceil = (h_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
w_idxs_ceil = (w_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
dh = h_idxs - h_idxs_floor
dw = w_idxs - w_idxs_floor
base_h = h_idxs_floor * self.num_grid_per_side
base_h_ceil = h_idxs_ceil * self.num_grid_per_side
indices = [
(base_h[None].T + w_idxs_floor[None]).flatten(),
(base_h[None].T + w_idxs_ceil[None]).flatten(),
(base_h_ceil[None].T + w_idxs_floor[None]).flatten(),
(base_h_ceil[None].T + w_idxs_ceil[None]).flatten(),
]
weights = [
((1 - dh)[None].T * (1 - dw)[None]).flatten(),
((1 - dh)[None].T * dw[None]).flatten(),
(dh[None].T * (1 - dw)[None]).flatten(),
(dh[None].T * dw[None]).flatten(),
]
for i in range(4):
idx_list[i].extend(indices[i].tolist())
weight_list[i].extend(weights[i].tolist())
idx_tensor = torch.tensor(idx_list, dtype=torch.long, device=device)
weight_tensor = torch.tensor(weight_list, dtype=self.pos_embed.weight.dtype, device=device)
pos_embeds = self.pos_embed(idx_tensor).to(device) * weight_tensor[:, :, None]
patch_pos_embeds = pos_embeds[0] + pos_embeds[1] + pos_embeds[2] + pos_embeds[3]
patch_pos_embeds = patch_pos_embeds.split([h * w for h, w in zip(grid_hs, grid_ws)])
patch_pos_embeds_permute = []
merge_size = self.config.spatial_merge_size
for pos_embed, t, h, w in zip(patch_pos_embeds, grid_ts, grid_hs, grid_ws):
pos_embed = pos_embed.repeat(t, 1)
pos_embed = (
pos_embed.view(t, h // merge_size, merge_size, w // merge_size, merge_size, -1)
.permute(0, 1, 3, 2, 4, 5)
.flatten(0, 4)
)
patch_pos_embeds_permute.append(pos_embed)
patch_pos_embeds = torch.cat(patch_pos_embeds_permute)
return patch_pos_embeds
def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
"""
Args:
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
The final hidden states of the model.
grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
The temporal, height and width of feature shape of each image in LLM.
Returns:
`torch.Tensor`: hidden_states.
"""
hidden_states = self.patch_embed(hidden_states)
pos_embeds = self.fast_pos_embed_interpolate(grid_thw)
hidden_states = hidden_states + pos_embeds
rotary_pos_emb = self.rot_pos_emb(grid_thw)
seq_len, _ = hidden_states.size()
hidden_states = hidden_states.reshape(seq_len, -1)
rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
dim=0,
# Select dtype based on the following factors:
# - FA2 requires that cu_seqlens_q must have dtype int32
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
# See https://github.com/huggingface/transformers/pull/34852 for more information
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
deepstack_feature_lists = []
for layer_num, blk in enumerate(self.blocks):
hidden_states = blk(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
**kwargs,
)
if layer_num in self.deepstack_visual_indexes:
deepstack_feature = self.deepstack_merger_list[self.deepstack_visual_indexes.index(layer_num)](
hidden_states
)
deepstack_feature_lists.append(deepstack_feature)
hidden_states = self.merger(hidden_states)
return hidden_states, deepstack_feature_lists
|
Qwen3VLMoeVisionModel
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/suite/test_types.py
|
{
"start": 23633,
"end": 23980
}
|
class ____(_DateFixture, fixtures.TablesTest):
__requires__ = ("date",)
__backend__ = True
datatype = Date
data = datetime.date(2012, 10, 15)
@testing.requires.date_implicit_bound
def test_select_direct(self, connection):
result = connection.scalar(select(literal(self.data)))
eq_(result, self.data)
|
DateTest
|
python
|
getsentry__sentry
|
src/sentry/incidents/logic.py
|
{
"start": 10854,
"end": 11011
}
|
class ____(BaseMetricIssueQueryParams):
start_arg: datetime | None = None
end_arg: datetime | None = None
@dataclass
|
CalculateOpenPeriodTimeRangeParams
|
python
|
pytorch__pytorch
|
torch/fx/experimental/symbolic_shapes.py
|
{
"start": 81118,
"end": 83000
}
|
class ____(StatelessSymbolicContext):
"""
Create symbols in ``create_symbolic_sizes_strides_storage_offset`` via
a symbolic_context determination as given by a cache of Source:Symbol. A cache hit
will reuse a stored symbol, and a cache miss will write to this cache.
This behaves like StatelessSymbolicContext, except the cache supersedes the
other values - dynamic_sizes and constraint_sizes will not be read if we cache
hit.
It is the cache owner's responsibility to maintain the lifecycle of the cache
with respect to different shape_envs, clearing, etc.
"""
tensor_source: Source = None # type: ignore[assignment]
# Why is this keyed on int first?
# That integer is actually the id of the shape_env. This cache short-circuits symbol
# creation, and we must store it per shape env. Now, while tracing invariants are a single
# shape env per tracing context, and every new frame gets a new shape_env. So where would we have
# multiple shape envs? The answer lies in recording. When we are replaying, replay_shape_env_events
# is invoked, and creates a new shape_env. Replaying events against this new shape_env will
# cause it to fail with unknown symbols, as the symbols cached here will skip creation, and never
# get recorded in var_to_val, etc.
# TODO(voz): consider a weakref to the shape_env here
shape_env_to_source_to_symbol_cache: dict[int, dict[str, sympy.Expr]] = None # type: ignore[assignment]
def __post_init__(self) -> None:
super().__post_init__()
# The None default is annoying, but required because of dataclass limitations
assert self.tensor_source is not None
if not self.shape_env_to_source_to_symbol_cache:
object.__setattr__(self, "shape_env_to_source_to_symbol_cache", {})
@dataclass(frozen=True)
|
StatefulSymbolicContext
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/fixtures/base.py
|
{
"start": 729,
"end": 12127
}
|
class ____:
# A sequence of requirement names matching testing.requires decorators
__requires__ = ()
# A sequence of dialect names to exclude from the test class.
__unsupported_on__ = ()
# If present, test class is only runnable for the *single* specified
# dialect. If you need multiple, use __unsupported_on__ and invert.
__only_on__ = None
# A sequence of no-arg callables. If any are True, the entire testcase is
# skipped.
__skip_if__ = None
# if True, the testing reaper will not attempt to touch connection
# state after a test is completed and before the outer teardown
# starts
__leave_connections_for_teardown__ = False
def assert_(self, val, msg=None):
assert val, msg
@config.fixture()
def nocache(self):
_cache = config.db._compiled_cache
config.db._compiled_cache = None
yield
config.db._compiled_cache = _cache
@config.fixture()
def connection_no_trans(self):
eng = getattr(self, "bind", None) or config.db
with eng.connect() as conn:
yield conn
@config.fixture()
def connection(self):
global _connection_fixture_connection
eng = getattr(self, "bind", None) or config.db
conn = eng.connect()
trans = conn.begin()
_connection_fixture_connection = conn
yield conn
_connection_fixture_connection = None
if trans.is_active:
trans.rollback()
# trans would not be active here if the test is using
# the legacy @provide_metadata decorator still, as it will
# run a close all connections.
conn.close()
@config.fixture()
def close_result_when_finished(self):
to_close = []
to_consume = []
def go(result, consume=False):
to_close.append(result)
if consume:
to_consume.append(result)
yield go
for r in to_consume:
try:
r.all()
except:
pass
for r in to_close:
try:
r.close()
except:
pass
@config.fixture()
def registry(self, metadata):
reg = registry(
metadata=metadata,
type_annotation_map={
str: sa.String().with_variant(
sa.String(50), "mysql", "mariadb", "oracle"
)
},
)
yield reg
reg.dispose()
@config.fixture
def decl_base(self, metadata):
_md = metadata
class Base(DeclarativeBase):
metadata = _md
type_annotation_map = {
str: sa.String().with_variant(
sa.String(50), "mysql", "mariadb", "oracle"
)
}
yield Base
Base.registry.dispose()
@config.fixture
def dc_decl_base(self, metadata):
_md = metadata
class Base(MappedAsDataclass, DeclarativeBase):
metadata = _md
type_annotation_map = {
str: sa.String().with_variant(
sa.String(50), "mysql", "mariadb"
)
}
yield Base
Base.registry.dispose()
@config.fixture()
def future_connection(self, future_engine, connection):
# integrate the future_engine and connection fixtures so
# that users of the "connection" fixture will get at the
# "future" connection
yield connection
@config.fixture()
def future_engine(self):
yield
@config.fixture()
def testing_engine(self):
from .. import engines
def gen_testing_engine(
url=None,
options=None,
asyncio=False,
):
if options is None:
options = {}
options["scope"] = "fixture"
return engines.testing_engine(
url=url,
options=options,
asyncio=asyncio,
)
yield gen_testing_engine
engines.testing_reaper._drop_testing_engines("fixture")
@config.fixture()
def async_testing_engine(self, testing_engine):
def go(**kw):
kw["asyncio"] = True
return testing_engine(**kw)
return go
@config.fixture(params=picklers())
def picklers(self, request):
yield request.param
@config.fixture()
def metadata(self, request):
"""Provide bound MetaData for a single test, dropping afterwards."""
from ...sql import schema
metadata = schema.MetaData()
request.instance.metadata = metadata
yield metadata
del request.instance.metadata
if (
_connection_fixture_connection
and _connection_fixture_connection.in_transaction()
):
trans = _connection_fixture_connection.get_transaction()
trans.rollback()
with _connection_fixture_connection.begin():
drop_all_tables_from_metadata(
metadata, _connection_fixture_connection
)
else:
drop_all_tables_from_metadata(metadata, config.db)
@config.fixture(
params=[
(rollback, second_operation, begin_nested)
for rollback in (True, False)
for second_operation in ("none", "execute", "begin")
for begin_nested in (
True,
False,
)
]
)
def trans_ctx_manager_fixture(self, request, metadata):
rollback, second_operation, begin_nested = request.param
t = Table("test", metadata, Column("data", Integer))
eng = getattr(self, "bind", None) or config.db
t.create(eng)
def run_test(subject, trans_on_subject, execute_on_subject):
with subject.begin() as trans:
if begin_nested:
if not config.requirements.savepoints.enabled:
config.skip_test("savepoints not enabled")
if execute_on_subject:
nested_trans = subject.begin_nested()
else:
nested_trans = trans.begin_nested()
with nested_trans:
if execute_on_subject:
subject.execute(t.insert(), {"data": 10})
else:
trans.execute(t.insert(), {"data": 10})
# for nested trans, we always commit/rollback on the
# "nested trans" object itself.
# only Session(future=False) will affect savepoint
# transaction for session.commit/rollback
if rollback:
nested_trans.rollback()
else:
nested_trans.commit()
if second_operation != "none":
with assertions.expect_raises_message(
sa.exc.InvalidRequestError,
"Can't operate on closed transaction "
"inside context "
"manager. Please complete the context "
"manager "
"before emitting further commands.",
):
if second_operation == "execute":
if execute_on_subject:
subject.execute(
t.insert(), {"data": 12}
)
else:
trans.execute(t.insert(), {"data": 12})
elif second_operation == "begin":
if execute_on_subject:
subject.begin_nested()
else:
trans.begin_nested()
# outside the nested trans block, but still inside the
# transaction block, we can run SQL, and it will be
# committed
if execute_on_subject:
subject.execute(t.insert(), {"data": 14})
else:
trans.execute(t.insert(), {"data": 14})
else:
if execute_on_subject:
subject.execute(t.insert(), {"data": 10})
else:
trans.execute(t.insert(), {"data": 10})
if trans_on_subject:
if rollback:
subject.rollback()
else:
subject.commit()
else:
if rollback:
trans.rollback()
else:
trans.commit()
if second_operation != "none":
with assertions.expect_raises_message(
sa.exc.InvalidRequestError,
"Can't operate on closed transaction inside "
"context "
"manager. Please complete the context manager "
"before emitting further commands.",
):
if second_operation == "execute":
if execute_on_subject:
subject.execute(t.insert(), {"data": 12})
else:
trans.execute(t.insert(), {"data": 12})
elif second_operation == "begin":
if hasattr(trans, "begin"):
trans.begin()
else:
subject.begin()
elif second_operation == "begin_nested":
if execute_on_subject:
subject.begin_nested()
else:
trans.begin_nested()
expected_committed = 0
if begin_nested:
# begin_nested variant, we inserted a row after the nested
# block
expected_committed += 1
if not rollback:
# not rollback variant, our row inserted in the target
# block itself would be committed
expected_committed += 1
if execute_on_subject:
eq_(
subject.scalar(select(func.count()).select_from(t)),
expected_committed,
)
else:
with subject.connect() as conn:
eq_(
conn.scalar(select(func.count()).select_from(t)),
expected_committed,
)
return run_test
_connection_fixture_connection = None
|
TestBase
|
python
|
django__django
|
django/db/models/functions/text.py
|
{
"start": 7898,
"end": 8086
}
|
class ____(Func):
function = "REPLACE"
def __init__(self, expression, text, replacement=Value(""), **extra):
super().__init__(expression, text, replacement, **extra)
|
Replace
|
python
|
huggingface__transformers
|
tests/models/hiera/test_modeling_hiera.py
|
{
"start": 22106,
"end": 26112
}
|
class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("facebook/hiera-tiny-224-in1k-hf") if is_vision_available() else None
def test_inference_image_classification_head(self):
model = HieraForImageClassification.from_pretrained("facebook/hiera-tiny-224-in1k-hf").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
expected_pixel_values = torch.tensor(
[
[[0.2967, 0.4679, 0.4508], [0.3309, 0.4337, 0.3309], [0.3309, 0.3823, 0.3309]],
[[-1.5455, -1.4930, -1.5455], [-1.5280, -1.4755, -1.5980], [-1.5630, -1.5280, -1.4755]],
[[-0.6367, -0.4973, -0.5321], [-0.7936, -0.6715, -0.6715], [-0.8284, -0.7413, -0.5670]],
]
).to(torch_device)
torch.testing.assert_close(inputs.pixel_values[0, :3, :3, :3], expected_pixel_values, rtol=1e-4, atol=1e-4)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([[0.8028, 0.2409, -0.2254, -0.3712, -0.2848]]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :5], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_interpolate_pos_encoding(self):
model = HieraModel.from_pretrained("facebook/hiera-tiny-224-hf").to(torch_device)
image_processor = AutoImageProcessor.from_pretrained(
"facebook/hiera-tiny-224-hf", size={"shortest_edge": 448}, crop_size={"height": 448, "width": 448}
)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(pixel_values, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((1, 196, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[1.7853, 0.0690, 0.3177], [2.6853, -0.2334, 0.0889], [1.5445, -0.1515, -0.0300]]
).to(torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_for_pretraining(self):
# make random mask reproducible
torch.manual_seed(2)
model = HieraForPreTraining.from_pretrained("facebook/hiera-tiny-224-mae-hf").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
config = model.config
mask_spatial_shape = [
i // s // ms for i, s, ms in zip(config.image_size, config.patch_stride, config.masked_unit_size)
]
num_windows = math.prod(mask_spatial_shape)
noise = torch.rand(1, num_windows).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs, noise=noise)
# verify the logits
expected_shape = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor(
[
[1.6407, 1.6506, 1.6541, 1.6617, 1.6703],
[1.9730, 1.9842, 1.9848, 1.9896, 1.9947],
[1.5949, 1.8262, 1.2602, 1.4801, 1.4448],
[1.2341, 1.7907, 0.8618, 1.5202, 1.4523],
[2.0140, 1.9846, 1.9434, 1.9019, 1.8648],
]
)
torch.testing.assert_close(outputs.logits[0, :5, :5], expected_slice.to(torch_device), rtol=1e-4, atol=1e-4)
@require_torch
|
HieraModelIntegrationTest
|
python
|
django__django
|
django/contrib/gis/db/backends/mysql/operations.py
|
{
"start": 541,
"end": 5174
}
|
class ____(BaseSpatialOperations, DatabaseOperations):
name = "mysql"
geom_func_prefix = "ST_"
Adapter = WKTAdapter
@cached_property
def mariadb(self):
return self.connection.mysql_is_mariadb
@cached_property
def mysql(self):
return not self.connection.mysql_is_mariadb
@cached_property
def select(self):
return self.geom_func_prefix + "AsBinary(%s)"
@cached_property
def from_text(self):
return self.geom_func_prefix + "GeomFromText"
@cached_property
def collect(self):
if self.connection.features.supports_collect_aggr:
return self.geom_func_prefix + "Collect"
@cached_property
def gis_operators(self):
operators = {
"bbcontains": SpatialOperator(
func="MBRContains"
), # For consistency w/PostGIS API
"bboverlaps": SpatialOperator(func="MBROverlaps"), # ...
"contained": SpatialOperator(func="MBRWithin"), # ...
"contains": SpatialOperator(func="ST_Contains"),
"coveredby": SpatialOperator(func="MBRCoveredBy"),
"crosses": SpatialOperator(func="ST_Crosses"),
"disjoint": SpatialOperator(func="ST_Disjoint"),
"equals": SpatialOperator(func="ST_Equals"),
"exact": SpatialOperator(func="ST_Equals"),
"intersects": SpatialOperator(func="ST_Intersects"),
"overlaps": SpatialOperator(func="ST_Overlaps"),
"same_as": SpatialOperator(func="ST_Equals"),
"touches": SpatialOperator(func="ST_Touches"),
"within": SpatialOperator(func="ST_Within"),
}
if self.connection.mysql_is_mariadb:
operators["relate"] = SpatialOperator(func="ST_Relate")
if self.connection.mysql_version < (12, 0, 1):
del operators["coveredby"]
else:
operators["covers"] = SpatialOperator(func="MBRCovers")
return operators
@cached_property
def disallowed_aggregates(self):
disallowed_aggregates = [
models.Extent,
models.Extent3D,
models.MakeLine,
models.Union,
]
is_mariadb = self.connection.mysql_is_mariadb
if is_mariadb:
if self.connection.mysql_version < (12, 0, 1):
disallowed_aggregates.insert(0, models.Collect)
return tuple(disallowed_aggregates)
function_names = {
"FromWKB": "ST_GeomFromWKB",
"FromWKT": "ST_GeomFromText",
}
@cached_property
def unsupported_functions(self):
unsupported = {
"AsGML",
"AsKML",
"AsSVG",
"Azimuth",
"BoundingCircle",
"ClosestPoint",
"ForcePolygonCW",
"GeometryDistance",
"IsEmpty",
"LineLocatePoint",
"MakeValid",
"MemSize",
"NumDimensions",
"Perimeter",
"PointOnSurface",
"Reverse",
"Rotate",
"Scale",
"SnapToGrid",
"Transform",
"Translate",
}
if self.connection.mysql_is_mariadb:
unsupported.remove("PointOnSurface")
if self.connection.mysql_version < (12, 0, 1):
unsupported.update({"GeoHash", "IsValid"})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_distance(self, f, value, lookup_type):
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError(
"Only numeric values of degree units are allowed on "
"geodetic distance queries."
)
dist_param = getattr(
value, Distance.unit_attname(f.units_name(self.connection))
)
else:
dist_param = value
return [dist_param]
def get_geometry_converter(self, expression):
read = wkb_r().read
srid = expression.output_field.srid
if srid == -1:
srid = None
geom_class = expression.output_field.geom_class
def converter(value, expression, connection):
if value is not None:
geom = GEOSGeometryBase(read(memoryview(value)), geom_class)
if srid:
geom.srid = srid
return geom
return converter
def spatial_aggregate_name(self, agg_name):
return getattr(self, agg_name.lower())
|
MySQLOperations
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-zero-request-servers.py
|
{
"start": 66,
"end": 1016
}
|
class ____(object):
def countServers(self, n, logs, x, queries):
"""
:type n: int
:type logs: List[List[int]]
:type x: int
:type queries: List[int]
:rtype: List[int]
"""
logs.sort(key=lambda x:x[1])
result = [0]*len(queries)
cnt = [0]*n
curr = left = right = 0
for t, i in sorted((t, i) for i, t in enumerate(queries)):
while right < len(logs) and logs[right][1] <= t:
if cnt[logs[right][0]-1] == 0:
curr += 1
cnt[logs[right][0]-1] += 1
right += 1
while left < right and logs[left][1] < t-x:
cnt[logs[left][0]-1] -= 1
if cnt[logs[left][0]-1] == 0:
curr -= 1
left += 1
result[i] = n-curr
return result
# Time: O(nlogn + mlogm)
# Space: O(n + m)
# sort, line sweep
|
Solution
|
python
|
numba__numba
|
numba/cuda/simulator/kernel.py
|
{
"start": 991,
"end": 1305
}
|
class ____:
'''
Used only to provide the max_cooperative_grid_blocks method
'''
def max_cooperative_grid_blocks(self, blockdim):
# We can only run one block in a cooperative grid because we have no
# mechanism for synchronization between different blocks
return 1
|
FakeOverload
|
python
|
astropy__astropy
|
astropy/utils/masked/tests/test_function_helpers.py
|
{
"start": 1860,
"end": 2079
}
|
class ____(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o, expected)
|
NoMaskTestSetup
|
python
|
ApeWorX__ape
|
tests/conftest.py
|
{
"start": 14151,
"end": 15131
}
|
class ____(SubprocessRunner):
"""
Subprocess runner for Ape-specific commands.
"""
def __init__(
self,
root_cmd: Optional[Union[str, Sequence[str]]] = None,
data_folder: Optional[Path] = None,
):
ape_path = Path(sys.executable).parent / "ape"
root = root_cmd or ()
if isinstance(root, str):
root = (root,)
super().__init__([str(ape_path), *root], data_folder=data_folder)
self.project = None
def invoke(self, *subcommand: str, input=None, timeout: int = 40, env: Optional[dict] = None):
if self.project:
try:
here = os.getcwd()
except Exception:
here = None
os.chdir(f"{self.project.path}")
else:
here = None
result = super().invoke(*subcommand, input=input, timeout=timeout, env=env)
if here:
os.chdir(here)
return result
|
ApeSubprocessRunner
|
python
|
apache__airflow
|
providers/fab/src/airflow/providers/fab/auth_manager/schemas/role_and_permission_schema.py
|
{
"start": 1637,
"end": 1907
}
|
class ____(SQLAlchemySchema):
"""Action View Schema."""
class Meta:
"""Meta."""
model = Permission
action = fields.Nested(ActionSchema, data_key="action")
resource = fields.Nested(ResourceSchema, data_key="resource")
|
ActionResourceSchema
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/triggers/bedrock.py
|
{
"start": 2533,
"end": 3964
}
|
class ____(AwsBaseWaiterTrigger):
"""
Trigger when a Bedrock Knowledge Base reaches the ACTIVE state.
:param knowledge_base_id: The unique identifier of the knowledge base for which to get information.
:param waiter_delay: The amount of time in seconds to wait between attempts. (default: 5)
:param waiter_max_attempts: The maximum number of attempts to be made. (default: 24)
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
*,
knowledge_base_id: str,
waiter_delay: int = 5,
waiter_max_attempts: int = 24,
aws_conn_id: str | None = None,
) -> None:
super().__init__(
serialized_fields={"knowledge_base_id": knowledge_base_id},
waiter_name="knowledge_base_active",
waiter_args={"knowledgeBaseId": knowledge_base_id},
failure_message="Bedrock Knowledge Base creation failed.",
status_message="Status of Bedrock Knowledge Base job is",
status_queries=["status"],
return_key="knowledge_base_id",
return_value=knowledge_base_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return BedrockAgentHook(aws_conn_id=self.aws_conn_id)
|
BedrockKnowledgeBaseActiveTrigger
|
python
|
scikit-learn__scikit-learn
|
sklearn/compose/tests/test_target.py
|
{
"start": 10907,
"end": 11722
}
|
class ____(TransformerMixin, BaseEstimator):
"""Dummy transformer which count how many time fit was called."""
def __init__(self, fit_counter=0):
self.fit_counter = fit_counter
def fit(self, X, y=None):
self.fit_counter += 1
return self
def transform(self, X):
return X
def inverse_transform(self, X):
return X
@pytest.mark.parametrize("check_inverse", [False, True])
def test_transform_target_regressor_count_fit(check_inverse):
# regression test for gh-issue #11618
# check that we only call a single time fit for the transformer
X, y = friedman
ttr = TransformedTargetRegressor(
transformer=DummyTransformer(), check_inverse=check_inverse
)
ttr.fit(X, y)
assert ttr.transformer_.fit_counter == 1
|
DummyTransformer
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/indices/keyword_table/retrievers.py
|
{
"start": 4539,
"end": 6207
}
|
class ____(BaseKeywordTableRetriever):
"""
Keyword Table Index GPT Retriever.
Extracts keywords using GPT. Set when using `retriever_mode="default"`.
See BaseGPTKeywordTableQuery for arguments.
"""
def __init__(
self,
index: BaseKeywordTableIndex,
keyword_extract_template: Optional[BasePromptTemplate] = None,
query_keyword_extract_template: Optional[BasePromptTemplate] = None,
max_keywords_per_query: int = 10,
num_chunks_per_query: int = 10,
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._llm = llm or Settings.llm
super().__init__(
index=index,
keyword_extract_template=keyword_extract_template,
query_keyword_extract_template=query_keyword_extract_template,
max_keywords_per_query=max_keywords_per_query,
num_chunks_per_query=num_chunks_per_query,
callback_manager=callback_manager or Settings.callback_manager,
object_map=object_map,
verbose=verbose,
)
def _get_keywords(self, query_str: str) -> List[str]:
"""Extract keywords."""
response = self._llm.predict(
self.query_keyword_extract_template,
max_keywords=self.max_keywords_per_query,
question=query_str,
)
keywords = extract_keywords_given_response(response, start_token="KEYWORDS:")
return list(keywords)
|
KeywordTableGPTRetriever
|
python
|
pytorch__pytorch
|
torch/_functorch/_aot_autograd/autograd_cache.py
|
{
"start": 20541,
"end": 20784
}
|
class ____(CacheArtifact):
@override
def populate_cache(self):
AOTAutogradCache._write_to_local_cache(self.key, self.content)
@override
@staticmethod
def type():
return "aot_autograd"
|
AOTAutogradCacheArtifact
|
python
|
readthedocs__readthedocs.org
|
readthedocs/integrations/models.py
|
{
"start": 11979,
"end": 14023
}
|
class ____(Integration):
"""
Dummy integration for GitHub App projects.
This is a proxy model for the Integration model, which is used to
represent GitHub App integrations in the UI.
This integration is automatically created when a project is linked to a
remote repository from a GitHub App installation, and it remains
associated with the project even if the remote repository is removed.
The `provider_data` field is a JSON representation of the `GitHubAppIntegrationProviderData` class.
"""
integration_type_id = Integration.GITHUBAPP
has_sync = False
is_remote_only = True
class Meta:
proxy = True
def get_absolute_url(self) -> str | None:
"""
Get URL of the GHA installation page.
Instead of showing a link to the integration details page, for GHA
projects we show a link in the UI to the GHA installation page for the
installation used by the project.
"""
# If the GHA is disconnected we'll disonnect the remote repository and
# so we won't have a URL to the installation page the project should be
# using. We might want to store this on the model later so a repository
# that is removed from the installation can still link to the
# installation the project was _previously_ using.
if self.project.is_github_app_project:
return self.project.remote_repository.github_app_installation.url
return None
@property
def is_active(self) -> bool:
"""
Is the GHA connection active for this project?
This assumes that the status of the GHA connect will be reflected as
soon as there is an event that might disconnect the GHA on GitHub's
side -- uninstalling the app or revoking permission to the repository.
We listen for these events and should disconnect the remote
repository, but would leave this integration.
"""
return self.project.is_github_app_project
|
GitHubAppIntegration
|
python
|
Unity-Technologies__ml-agents
|
ml-agents-envs/mlagents_envs/communicator.py
|
{
"start": 379,
"end": 1887
}
|
class ____:
def __init__(self, worker_id=0, base_port=5005):
"""
Python side of the communication. Must be used in pair with the right Unity Communicator equivalent.
:int worker_id: Offset from base_port. Used for training multiple environments simultaneously.
:int base_port: Baseline port number to connect to Unity environment over. worker_id increments over this.
"""
def initialize(
self, inputs: UnityInputProto, poll_callback: Optional[PollCallback] = None
) -> UnityOutputProto:
"""
Used to exchange initialization parameters between Python and the Environment
:param inputs: The initialization input that will be sent to the environment.
:param poll_callback: Optional callback to be used while polling the connection.
:return: UnityOutput: The initialization output sent by Unity
"""
def exchange(
self, inputs: UnityInputProto, poll_callback: Optional[PollCallback] = None
) -> Optional[UnityOutputProto]:
"""
Used to send an input and receive an output from the Environment
:param inputs: The UnityInput that needs to be sent the Environment
:param poll_callback: Optional callback to be used while polling the connection.
:return: The UnityOutputs generated by the Environment
"""
def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the connection.
"""
|
Communicator
|
python
|
scrapy__scrapy
|
tests/test_command_crawl.py
|
{
"start": 3178,
"end": 3650
}
|
class ____(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug('It works!')
return
yield
"""
log = self.get_log(spider_code, proj_path, args=("-s", "TWISTED_REACTOR="))
assert "[myspider] DEBUG: It works!" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
not in log
)
assert "Spider closed (finished)" in log
|
MySpider
|
python
|
pypa__warehouse
|
warehouse/config.py
|
{
"start": 1580,
"end": 36143
}
|
class ____:
__parent__ = None
__name__ = None
__acl__ = [
(
Allow,
"group:admins",
(
Permissions.AdminBannerRead,
Permissions.AdminBannerWrite,
Permissions.AdminDashboardRead,
Permissions.AdminDashboardSidebarRead,
Permissions.AdminEmailsRead,
Permissions.AdminEmailsWrite,
Permissions.AdminFlagsRead,
Permissions.AdminFlagsWrite,
Permissions.AdminIpAddressesRead,
Permissions.AdminJournalRead,
Permissions.AdminMacaroonsRead,
Permissions.AdminMacaroonsWrite,
Permissions.AdminObservationsRead,
Permissions.AdminObservationsWrite,
Permissions.AdminOrganizationsRead,
Permissions.AdminOrganizationsSetLimit,
Permissions.AdminOrganizationsWrite,
Permissions.AdminOrganizationsNameWrite,
Permissions.AdminProhibitedEmailDomainsRead,
Permissions.AdminProhibitedEmailDomainsWrite,
Permissions.AdminProhibitedProjectsRead,
Permissions.AdminProhibitedProjectsWrite,
Permissions.AdminProhibitedProjectsRelease,
Permissions.AdminProhibitedUsernameRead,
Permissions.AdminProhibitedUsernameWrite,
Permissions.AdminProjectsDelete,
Permissions.AdminProjectsRead,
Permissions.AdminProjectsSetLimit,
Permissions.AdminProjectsWrite,
Permissions.AdminRoleAdd,
Permissions.AdminRoleDelete,
Permissions.AdminRoleUpdate,
Permissions.AdminSponsorsRead,
Permissions.AdminUsersRead,
Permissions.AdminUsersWrite,
Permissions.AdminUsersEmailWrite,
Permissions.AdminUsersAccountRecoveryWrite,
),
),
(
Allow,
"group:support",
(
Permissions.AdminBannerRead,
Permissions.AdminDashboardRead,
Permissions.AdminDashboardSidebarRead,
Permissions.AdminEmailsRead,
Permissions.AdminFlagsRead,
Permissions.AdminJournalRead,
Permissions.AdminObservationsRead,
Permissions.AdminObservationsWrite,
Permissions.AdminOrganizationsRead,
Permissions.AdminOrganizationsSetLimit,
Permissions.AdminOrganizationsWrite,
Permissions.AdminOrganizationsNameWrite,
Permissions.AdminProhibitedEmailDomainsRead,
Permissions.AdminProhibitedProjectsRead,
Permissions.AdminProhibitedProjectsRelease,
Permissions.AdminProhibitedUsernameRead,
Permissions.AdminProjectsRead,
Permissions.AdminProjectsSetLimit,
Permissions.AdminRoleAdd,
Permissions.AdminRoleDelete,
Permissions.AdminRoleUpdate,
Permissions.AdminSponsorsRead,
Permissions.AdminUsersRead,
Permissions.AdminUsersEmailWrite,
Permissions.AdminUsersAccountRecoveryWrite,
),
),
(
Allow,
"group:moderators",
(
Permissions.AdminBannerRead,
Permissions.AdminDashboardRead,
Permissions.AdminDashboardSidebarRead,
Permissions.AdminEmailsRead,
Permissions.AdminFlagsRead,
Permissions.AdminJournalRead,
Permissions.AdminObservationsRead,
Permissions.AdminObservationsWrite,
Permissions.AdminOrganizationsRead,
Permissions.AdminProhibitedEmailDomainsRead,
Permissions.AdminProhibitedProjectsRead,
Permissions.AdminProhibitedUsernameRead,
Permissions.AdminProjectsRead,
Permissions.AdminProjectsSetLimit,
Permissions.AdminRoleAdd,
Permissions.AdminRoleDelete,
Permissions.AdminRoleUpdate,
Permissions.AdminSponsorsRead,
Permissions.AdminUsersRead,
),
),
(
Allow,
"group:psf_staff",
(
Permissions.AdminBannerRead,
Permissions.AdminBannerWrite,
Permissions.AdminDashboardRead,
Permissions.AdminSponsorsRead,
Permissions.AdminSponsorsWrite,
),
),
(
Allow,
"group:observers",
(
Permissions.APIEcho,
Permissions.APIObservationsAdd,
),
),
(
Allow,
Authenticated,
(
Permissions.Account2FA,
Permissions.AccountAPITokens,
Permissions.AccountManage,
Permissions.AccountManagePublishing,
Permissions.AccountVerifyEmail,
Permissions.AccountVerifyOrgRole,
Permissions.AccountVerifyProjectRole,
Permissions.OrganizationsManage,
Permissions.ProjectsRead,
),
),
]
def __init__(self, request):
pass
def require_https_tween_factory(handler, registry):
if not registry.settings.get("enforce_https", True):
return handler
def require_https_tween(request):
# If we have an :action URL and we're not using HTTPS, then we want to
# return a 403 error.
if request.params.get(":action", None) and request.scheme != "https":
resp = HTTPForbidden(body="SSL is required.", content_type="text/plain")
resp.status = "403 SSL is required"
resp.headers["X-Fastly-Error"] = "803"
return resp
return handler(request)
return require_https_tween
def activate_hook(request):
if request.path.startswith(("/_debug_toolbar/", "/static/")):
return False
return True
def template_view(config, name, route, template, route_kw=None, view_kw=None):
if route_kw is None:
route_kw = {}
if view_kw is None:
view_kw = {}
config.add_route(name, route, **route_kw)
config.add_view(renderer=template, route_name=name, **view_kw)
def maybe_set(settings, name, envvar, coercer=None, default=None):
if envvar in os.environ:
value = os.environ[envvar]
if coercer is not None:
value = coercer(value)
settings.setdefault(name, value)
elif default is not None:
settings.setdefault(name, default)
def maybe_set_compound(settings, base, name, envvar):
if envvar in os.environ:
value = shlex.split(os.environ[envvar])
kwargs = {k: v for k, v in (i.split("=") for i in value[1:])}
settings[".".join([base, name])] = value[0]
for key, value in kwargs.items():
settings[".".join([base, key])] = value
def maybe_set_redis(settings, name, envvar, coercer=None, default=None, db=None):
"""
Note on our DB numbering:
- General purpose caches and temporary storage should go in 1-9
- Celery queues, results, and schedulers should use 10-15
- By default Redis only allows use of 0-15, so db should be <16
"""
if envvar in os.environ:
value = os.environ[envvar]
if coercer is not None:
value = coercer(value)
parsed_url = urlparse(value) # noqa: WH001, we're going to urlunparse this
parsed_url = parsed_url._replace(path=(str(db) if db is not None else "0"))
value = urlunparse(parsed_url)
settings.setdefault(name, value)
elif default is not None:
settings.setdefault(name, default)
def from_base64_encoded_json(configuration):
return json.loads(base64.urlsafe_b64decode(configuration.encode("ascii")))
def reject_duplicate_post_keys_view(view, info):
if info.options.get("permit_duplicate_post_keys") or info.exception_only:
return view
else:
# If this isn't an exception or hasn't been permitted to have duplicate
# POST keys, wrap the view with a check
@functools.wraps(view)
def wrapped(context, request):
if request.POST:
# Determine if there are any duplicate keys
keys = list(request.POST.keys())
if len(keys) != len(set(keys)):
return HTTPBadRequest(
"POST body may not contain duplicate keys "
f"(URL: {request.url!r})"
)
# Casting succeeded, so just return the regular view
return view(context, request)
return wrapped
reject_duplicate_post_keys_view.options = {"permit_duplicate_post_keys"} # type: ignore
def configure(settings=None):
# Sanity check: regardless of what we're configuring, some of Warehouse's
# application state depends on a handful of XDG directories existing.
platformdirs.user_data_dir(appname=secrets.token_urlsafe(), ensure_exists=True)
platformdirs.user_cache_dir(appname=secrets.token_urlsafe(), ensure_exists=True)
if settings is None:
settings = {}
settings["warehouse.forklift.legacy.MAX_FILESIZE_MIB"] = MAX_FILESIZE / ONE_MIB
settings["warehouse.forklift.legacy.MAX_PROJECT_SIZE_GIB"] = (
MAX_PROJECT_SIZE / ONE_GIB
)
# Allow configuring the log level. See `warehouse/logging.py` for more
maybe_set(settings, "logging.level", "LOG_LEVEL")
# Add information about the current copy of the code.
maybe_set(settings, "warehouse.commit", "SOURCE_COMMIT", default="null")
# Set the environment from an environment variable, if one hasn't already
# been set.
maybe_set(
settings,
"warehouse.env",
"WAREHOUSE_ENV",
Environment,
default=Environment.production,
)
maybe_set(
settings,
"terms.revision",
"TERMS_REVISION",
default="initial",
)
maybe_set(
settings,
"terms.notification_batch_size",
"TERMS_NOTIFICATION_BATCH_SIZE",
int,
default=1000,
)
# Pull in default configuration from the environment.
maybe_set(settings, "warehouse.token", "WAREHOUSE_TOKEN")
maybe_set(settings, "warehouse.ip_salt", "WAREHOUSE_IP_SALT")
maybe_set(settings, "warehouse.num_proxies", "WAREHOUSE_NUM_PROXIES", int)
maybe_set(settings, "warehouse.domain", "WAREHOUSE_DOMAIN")
maybe_set(
settings,
"warehouse.allowed_domains",
"WAREHOUSE_ALLOWED_DOMAINS",
lambda s: [d.strip() for d in s.split(",") if d.strip()],
default=[],
)
maybe_set(settings, "forklift.domain", "FORKLIFT_DOMAIN")
maybe_set(settings, "auth.domain", "AUTH_DOMAIN")
maybe_set(
settings, "userdocs.domain", "USERDOCS_DOMAIN", default="https://docs.pypi.org"
)
maybe_set(settings, "warehouse.legacy_domain", "WAREHOUSE_LEGACY_DOMAIN")
maybe_set(settings, "site.name", "SITE_NAME", default="Warehouse")
maybe_set(settings, "aws.key_id", "AWS_ACCESS_KEY_ID")
maybe_set(settings, "aws.secret_key", "AWS_SECRET_ACCESS_KEY")
maybe_set(settings, "aws.region", "AWS_REGION")
maybe_set(settings, "b2.application_key_id", "B2_APPLICATION_KEY_ID")
maybe_set(settings, "b2.application_key", "B2_APPLICATION_KEY")
maybe_set(settings, "gcloud.project", "GCLOUD_PROJECT")
maybe_set(
settings,
"gcloud.service_account_info",
"GCLOUD_SERVICE_JSON",
from_base64_encoded_json,
)
maybe_set(
settings, "warehouse.release_files_table", "WAREHOUSE_RELEASE_FILES_TABLE"
)
maybe_set(settings, "github.token", "GITHUB_TOKEN")
maybe_set(
settings,
"github.token_scanning_meta_api.url",
"GITHUB_TOKEN_SCANNING_META_API_URL",
default="https://api.github.com/meta/public_keys/token_scanning",
)
maybe_set(settings, "warehouse.downloads_table", "WAREHOUSE_DOWNLOADS_TABLE")
maybe_set_redis(settings, "celery.broker_redis_url", "REDIS_URL", db=10)
maybe_set_redis(settings, "celery.result_url", "REDIS_URL", db=12)
maybe_set_redis(settings, "celery.scheduler_url", "REDIS_URL", db=0)
maybe_set_redis(settings, "oidc.jwk_cache_url", "REDIS_URL", db=1)
maybe_set(settings, "database.url", "DATABASE_URL")
maybe_set(settings, "opensearch.url", "OPENSEARCH_URL")
maybe_set(settings, "sentry.dsn", "SENTRY_DSN")
maybe_set(settings, "sentry.transport", "SENTRY_TRANSPORT")
maybe_set_redis(settings, "sessions.url", "REDIS_URL", db=2)
maybe_set_redis(settings, "ratelimit.url", "REDIS_URL", db=3)
maybe_set_redis(settings, "db_results_cache.url", "REDIS_URL", db=5)
maybe_set(settings, "captcha.backend", "CAPTCHA_BACKEND")
maybe_set(settings, "recaptcha.site_key", "RECAPTCHA_SITE_KEY")
maybe_set(settings, "recaptcha.secret_key", "RECAPTCHA_SECRET_KEY")
maybe_set(settings, "hcaptcha.site_key", "HCAPTCHA_SITE_KEY")
maybe_set(settings, "hcaptcha.secret_key", "HCAPTCHA_SECRET_KEY")
maybe_set(settings, "sessions.secret", "SESSION_SECRET")
maybe_set(settings, "camo.url", "CAMO_URL")
maybe_set(settings, "camo.key", "CAMO_KEY")
maybe_set(settings, "docs.url", "DOCS_URL")
maybe_set(settings, "statuspage.url", "STATUSPAGE_URL")
maybe_set(settings, "hibp.api_key", "HIBP_API_KEY")
maybe_set(settings, "token.password.secret", "TOKEN_PASSWORD_SECRET")
maybe_set(settings, "token.email.secret", "TOKEN_EMAIL_SECRET")
maybe_set(settings, "token.two_factor.secret", "TOKEN_TWO_FACTOR_SECRET")
maybe_set(settings, "token.remember_device.secret", "TOKEN_REMEMBER_DEVICE_SECRET")
maybe_set(settings, "token.confirm_login.secret", "TOKEN_CONFIRM_LOGIN_SECRET")
maybe_set_redis(settings, "warehouse.xmlrpc.cache.url", "REDIS_URL", db=4)
maybe_set(
settings,
"warehouse.xmlrpc.client.ratelimit_string",
"XMLRPC_RATELIMIT_STRING",
default="3600 per hour",
)
maybe_set(settings, "token.password.max_age", "TOKEN_PASSWORD_MAX_AGE", coercer=int)
maybe_set(settings, "token.email.max_age", "TOKEN_EMAIL_MAX_AGE", coercer=int)
maybe_set(
settings,
"token.two_factor.max_age",
"TOKEN_TWO_FACTOR_MAX_AGE",
coercer=int,
default=300,
)
maybe_set(
settings,
"remember_device.days",
"REMEMBER_DEVICE_DAYS",
coercer=int,
default=30,
)
settings.setdefault(
"remember_device.seconds",
timedelta(days=settings.get("remember_device.days")).total_seconds(),
)
settings.setdefault(
"token.remember_device.max_age", settings.get("remember_device.seconds")
)
maybe_set(
settings,
"token.default.max_age",
"TOKEN_DEFAULT_MAX_AGE",
coercer=int,
default=21600, # 6 hours
)
maybe_set(
settings,
"reconcile_file_storages.batch_size",
"RECONCILE_FILE_STORAGES_BATCH_SIZE",
coercer=int,
default=100,
)
maybe_set_compound(settings, "billing", "backend", "BILLING_BACKEND")
maybe_set_compound(settings, "files", "backend", "FILES_BACKEND")
maybe_set_compound(settings, "archive_files", "backend", "ARCHIVE_FILES_BACKEND")
maybe_set_compound(settings, "simple", "backend", "SIMPLE_BACKEND")
maybe_set_compound(settings, "docs", "backend", "DOCS_BACKEND")
maybe_set_compound(settings, "sponsorlogos", "backend", "SPONSORLOGOS_BACKEND")
maybe_set_compound(settings, "origin_cache", "backend", "ORIGIN_CACHE")
maybe_set_compound(settings, "mail", "backend", "MAIL_BACKEND")
maybe_set_compound(settings, "metrics", "backend", "METRICS_BACKEND")
maybe_set_compound(settings, "breached_emails", "backend", "BREACHED_EMAILS")
maybe_set_compound(settings, "breached_passwords", "backend", "BREACHED_PASSWORDS")
maybe_set_compound(settings, "domain_status", "backend", "DOMAIN_STATUS_BACKEND")
maybe_set(
settings,
"oidc.backend",
"OIDC_BACKEND",
default="warehouse.oidc.services.OIDCPublisherService",
)
maybe_set(
settings,
"integrity.backend",
"INTEGRITY_BACKEND",
default="warehouse.attestations.services.IntegrityService",
)
# Pythondotorg integration settings
maybe_set(
settings,
"pythondotorg.host",
"PYTHONDOTORG_HOST",
default="https://www.python.org",
)
maybe_set(settings, "pythondotorg.api_token", "PYTHONDOTORG_API_TOKEN")
# Helpscout integration settings
maybe_set(
settings, "admin.helpscout.app_secret", "HELPSCOUT_APP_SECRET", default=None
)
maybe_set(settings, "helpdesk.backend", "HELPDESK_BACKEND")
maybe_set(settings, "helpscout.app_id", "HELPSCOUT_WAREHOUSE_APP_ID")
maybe_set(settings, "helpscout.app_secret", "HELPSCOUT_WAREHOUSE_APP_SECRET")
maybe_set(settings, "helpscout.mailbox_id", "HELPSCOUT_WAREHOUSE_MAILBOX_ID")
# Admin notification service settings
maybe_set(
settings, "helpdesk.notification_backend", "HELPDESK_NOTIFICATION_BACKEND"
)
maybe_set(
settings,
"helpdesk.notification_service_url",
"HELPDESK_NOTIFICATION_SERVICE_URL",
)
# Configure our ratelimiters
maybe_set(
settings,
"warehouse.account.user_login_ratelimit_string",
"USER_LOGIN_RATELIMIT_STRING",
default="10 per 5 minutes",
)
maybe_set(
settings,
"warehouse.account.ip_login_ratelimit_string",
"IP_LOGIN_RATELIMIT_STRING",
default="10 per 5 minutes",
)
maybe_set(
settings,
"warehouse.account.global_login_ratelimit_string",
"GLOBAL_LOGIN_RATELIMIT_STRING",
default="1000 per 5 minutes",
)
# Separate rate limiters for 2FA attempts to prevent brute-force attacks
maybe_set(
settings,
"warehouse.account.2fa_user_ratelimit_string",
"2FA_USER_RATELIMIT_STRING",
default="5 per 5 minutes, 20 per hour, 50 per day",
)
maybe_set(
settings,
"warehouse.account.2fa_ip_ratelimit_string",
"2FA_IP_RATELIMIT_STRING",
default="10 per 5 minutes, 50 per hour",
)
maybe_set(
settings,
"warehouse.account.email_add_ratelimit_string",
"EMAIL_ADD_RATELIMIT_STRING",
default="2 per day",
)
maybe_set(
settings,
"warehouse.account.verify_email_ratelimit_string",
"VERIFY_EMAIL_RATELIMIT_STRING",
default="3 per 6 hours",
)
maybe_set(
settings,
"warehouse.account.accounts_search_ratelimit_string",
"ACCOUNTS_SEARCH_RATELIMIT_STRING",
default="100 per hour",
)
maybe_set(
settings,
"warehouse.account.password_reset_ratelimit_string",
"PASSWORD_RESET_RATELIMIT_STRING",
default="5 per day",
)
maybe_set(
settings,
"warehouse.manage.oidc.user_registration_ratelimit_string",
"USER_OIDC_REGISTRATION_RATELIMIT_STRING",
default="100 per day",
)
maybe_set(
settings,
"warehouse.manage.oidc.ip_registration_ratelimit_string",
"IP_OIDC_REGISTRATION_RATELIMIT_STRING",
default="100 per day",
)
maybe_set(
settings,
"warehouse.packaging.project_create_user_ratelimit_string",
"PROJECT_CREATE_USER_RATELIMIT_STRING",
default="20 per hour",
)
maybe_set(
settings,
"warehouse.packaging.project_create_ip_ratelimit_string",
"PROJECT_CREATE_IP_RATELIMIT_STRING",
default="40 per hour",
)
maybe_set(
settings,
"warehouse.search.ratelimit_string",
"SEARCH_RATELIMIT_STRING",
default="5 per second",
)
# OIDC feature flags and settings
maybe_set(settings, "warehouse.oidc.audience", "OIDC_AUDIENCE")
maybe_set(
settings,
"warehouse.organizations.max_undecided_organization_applications",
"ORGANIZATION_MAX_UNDECIDED_APPLICATIONS",
coercer=int,
default=3,
)
# Add the settings we use when the environment is set to development.
if settings["warehouse.env"] == Environment.development:
settings.setdefault("enforce_https", False)
settings.setdefault("pyramid.reload_assets", True)
settings.setdefault("pyramid.reload_templates", True)
settings.setdefault("pyramid.prevent_http_cache", True)
settings.setdefault("debugtoolbar.hosts", ["0.0.0.0/0"])
settings.setdefault(
"debugtoolbar.panels",
[
".".join(["pyramid_debugtoolbar.panels", panel])
for panel in [
"versions.VersionDebugPanel",
"settings.SettingsDebugPanel",
"headers.HeaderDebugPanel",
"request_vars.RequestVarsDebugPanel",
"renderings.RenderingsDebugPanel",
"session.SessionDebugPanel",
"logger.LoggingPanel",
"performance.PerformanceDebugPanel",
"routes.RoutesDebugPanel",
"sqla.SQLADebugPanel",
"tweens.TweensDebugPanel",
"introspection.IntrospectionDebugPanel",
]
],
)
maybe_set(
settings,
"livereload.url",
"LIVERELOAD_URL",
default="http://localhost:35729",
)
# Actually setup our Pyramid Configurator with the values pulled in from
# the environment as well as the ones passed in to the configure function.
config = Configurator(settings=settings)
config.set_root_factory(RootFactory)
# Register support for services
config.include("pyramid_services")
# Register metrics
config.include(".metrics")
# Register our CSRF support. We do this here, immediately after we've
# created the Configurator instance so that we ensure to get our defaults
# set ASAP before anything else has a chance to set them and possibly call
# Configurator().commit()
config.include(".csrf")
# Include anything needed by the development environment.
if config.registry.settings["warehouse.env"] == Environment.development:
config.include("pyramid_debugtoolbar")
# Register our logging support
config.include(".logging")
# Register request utilities (nonce, etc.)
config.include(".request")
# We'll want to use Jinja2 as our template system.
config.include("pyramid_jinja2")
# Include our filters
config.include(".filters")
# Including pyramid_mailer for sending emails through SMTP.
config.include("pyramid_mailer")
# We want to use newstyle gettext
config.add_settings({"jinja2.newstyle": True})
# Our translation strings are all in the "messages" domain
config.add_settings({"jinja2.i18n.domain": "messages"})
# Trim the Jinja blocks from the output, it's extra whitespace.
config.add_settings({"jinja2.lstrip_blocks": True})
config.add_settings({"jinja2.trim_blocks": True})
# We also want to use Jinja2 for .html templates as well, because we just
# assume that all templates will be using Jinja.
config.add_jinja2_renderer(".html")
# Sometimes our files are .txt files and we still want to use Jinja2 to
# render them.
config.add_jinja2_renderer(".txt")
# Anytime we want to render a .xml template, we'll also use Jinja.
config.add_jinja2_renderer(".xml")
# We need to enable our Client Side Include extension
config.get_settings().setdefault(
"jinja2.extensions",
[
"warehouse.utils.html.ClientSideIncludeExtension",
"warehouse.i18n.extensions.TrimmedTranslatableTagsExtension",
],
)
# We'll want to configure some filters for Jinja2 as well.
filters = config.get_settings().setdefault("jinja2.filters", {})
filters.setdefault("format_classifiers", "warehouse.filters:format_classifiers")
filters.setdefault("classifier_id", "warehouse.filters:classifier_id")
filters.setdefault("format_tags", "warehouse.filters:format_tags")
filters.setdefault("json", "warehouse.filters:tojson")
filters.setdefault("camoify", "warehouse.filters:camoify")
filters.setdefault("shorten_number", "warehouse.filters:shorten_number")
filters.setdefault("urlparse", "warehouse.filters:urlparse")
filters.setdefault("contains_valid_uris", "warehouse.filters:contains_valid_uris")
filters.setdefault("format_package_type", "warehouse.filters:format_package_type")
filters.setdefault("parse_version", "warehouse.filters:parse_version")
filters.setdefault("localize_datetime", "warehouse.filters:localize_datetime")
filters.setdefault("ctime", "warehouse.filters:ctime")
filters.setdefault("is_recent", "warehouse.filters:is_recent")
filters.setdefault("canonicalize_name", "packaging.utils:canonicalize_name")
filters.setdefault("format_email", "warehouse.filters:format_email")
filters.setdefault(
"remove_invalid_xml_unicode", "warehouse.filters:remove_invalid_xml_unicode"
)
filters.setdefault("parse_isoformat", "warehouse.filters:parse_isoformat")
# We also want to register some global functions for Jinja
jglobals = config.get_settings().setdefault("jinja2.globals", {})
jglobals.setdefault("is_valid_uri", "warehouse.utils.http:is_valid_uri")
jglobals.setdefault("gravatar", "warehouse.utils.gravatar:gravatar")
jglobals.setdefault("gravatar_profile", "warehouse.utils.gravatar:profile")
jglobals.setdefault("now", "warehouse.utils:now")
# And some enums to reuse in the templates
jglobals.setdefault("AdminFlagValue", "warehouse.admin.flags:AdminFlagValue")
jglobals.setdefault("EventTag", "warehouse.events.tags:EventTag")
jglobals.setdefault("Permissions", "warehouse.authnz:Permissions")
jglobals.setdefault(
"OrganizationInvitationStatus",
"warehouse.organizations.models:OrganizationInvitationStatus",
)
jglobals.setdefault(
"OrganizationRoleType", "warehouse.organizations.models:OrganizationRoleType"
)
jglobals.setdefault(
"OrganizationType", "warehouse.organizations.models:OrganizationType"
)
jglobals.setdefault(
"RoleInvitationStatus", "warehouse.packaging.models:RoleInvitationStatus"
)
jglobals.setdefault(
"TeamProjectRoleType", "warehouse.organizations.models:TeamProjectRoleType"
)
# We'll store all of our templates in one location, warehouse/templates
# so we'll go ahead and add that to the Jinja2 search path.
config.add_jinja2_search_path("warehouse:templates", name=".html")
config.add_jinja2_search_path("warehouse:templates", name=".txt")
config.add_jinja2_search_path("warehouse:templates", name=".xml")
# We want to configure our JSON renderer to sort the keys, and also to use
# an ultra compact serialization format.
config.add_renderer(
"json",
renderers.JSON(
serializer=orjson.dumps,
option=orjson.OPT_SORT_KEYS | orjson.OPT_APPEND_NEWLINE,
),
)
# Configure retry support.
config.add_settings({"retry.attempts": 3})
config.include("pyramid_retry")
# Configure our transaction handling so that each request gets its own
# transaction handler and the lifetime of the transaction is tied to the
# lifetime of the request.
config.add_settings(
{
"tm.manager_hook": lambda request: transaction.TransactionManager(),
"tm.activate_hook": activate_hook,
"tm.annotate_user": False,
}
)
config.include("pyramid_tm")
# Register our XMLRPC service
config.include(".legacy.api.xmlrpc")
# Register our XMLRPC cache
config.include(".legacy.api.xmlrpc.cache")
# Register support for XMLRPC and override it's renderer to allow
# specifying custom dumps arguments.
config.include("pyramid_rpc.xmlrpc")
config.add_renderer("xmlrpc", XMLRPCRenderer(allow_none=True))
# Register support for our legacy action URLs
config.include(".legacy.action_routing")
# Register support for our custom predicates
config.include(".predicates")
# Register support for template views.
config.add_directive("add_template_view", template_view, action_wrap=False)
# Register support for internationalization and localization
config.include(".i18n")
# Register the configuration for the PostgreSQL database.
config.include(".db")
# Register the support for Celery Tasks
config.include(".tasks")
# Register support for our rate limiting mechanisms
config.include(".rate_limiting")
config.include(".static")
config.include(".search")
# Register the support for AWS, Backblaze,and Google Cloud
config.include(".aws")
config.include(".b2")
config.include(".gcloud")
# Register our session support
config.include(".sessions")
# Register our support for http and origin caching
config.include(".cache.http")
config.include(".cache.origin")
# Register our support for the database results cache
config.include(".cache")
# Register support for sending emails
config.include(".email")
# Register our authentication support.
config.include(".accounts")
# Register support for Macaroon based authentication
config.include(".macaroons")
# Register support for OIDC based authentication
config.include(".oidc")
# Register support for attestations
config.include(".attestations")
# Register logged-in views
config.include(".manage")
# Register our organization support.
config.include(".organizations")
# Register our subscription support.
config.include(".subscriptions")
# Allow the packaging app to register any services it has.
config.include(".packaging")
# Configure redirection support
config.include(".redirects") # internal
config.include("pyramid_redirect") # external
config.add_settings({"pyramid_redirect.structlog": True})
# Register all our URL routes for Warehouse.
config.include(".routes")
# Allow the sponsors app to list sponsors
config.include(".sponsors")
# Allow the banners app to list banners
config.include(".banners")
# Include our admin application
config.include(".admin")
# Register forklift, at least until we split it out into it's own project.
config.include(".forklift")
# Block non HTTPS requests for the legacy ?:action= routes when they are
# sent via POST.
config.add_tween("warehouse.config.require_https_tween_factory")
# Enable compression of our HTTP responses
config.add_tween(
"warehouse.utils.compression.compression_tween_factory",
over=[
"warehouse.cache.http.conditional_http_tween_factory",
"pyramid_debugtoolbar.toolbar_tween_factory",
EXCVIEW,
],
)
# Reject requests with duplicate POST keys
config.add_view_deriver(
reject_duplicate_post_keys_view, over="rendered_view", under="decorated_view"
)
# Enable Warehouse to serve our static files
prevent_http_cache = config.get_settings().get("pyramid.prevent_http_cache", False)
config.add_static_view(
"static",
"warehouse:static/dist/",
# Don't cache at all if prevent_http_cache is true, else we'll cache
# the files for 10 years.
cache_max_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.add_cache_buster(
"warehouse:static/dist/",
ManifestCacheBuster(
"warehouse:static/dist/manifest.json",
reload=config.registry.settings["pyramid.reload_assets"],
strict=not prevent_http_cache,
),
)
config.whitenoise_serve_static(
autorefresh=prevent_http_cache,
max_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.whitenoise_add_files("warehouse:static/dist/", prefix="/static/")
config.whitenoise_add_manifest(
"warehouse:static/dist/manifest.json", prefix="/static/"
)
# Set up API configuration
config.include(".api.config")
# Enable support of passing certain values like remote host, client
# address, and protocol support in from an outer proxy to the application.
config.add_wsgi_middleware(
ProxyFixer,
token=config.registry.settings["warehouse.token"],
ip_salt=config.registry.settings["warehouse.ip_salt"],
num_proxies=config.registry.settings.get("warehouse.num_proxies", 1),
)
# Protect against cache poisoning via the X-Vhm-Root headers.
config.add_wsgi_middleware(VhmRootRemover)
# Add our extensions to Request
config.include(".utils.wsgi")
# We want Sentry to be the last things we add here so that it's the outer
# most WSGI middleware.
config.include(".sentry")
# Register Content-Security-Policy service
config.include(".csp")
# Register Referrer-Policy service
config.include(".referrer_policy")
# Register Captcha service
config.include(".captcha")
# Register HelpDesk service
config.include(".helpdesk")
config.add_settings({"http": {"verify": "/etc/ssl/certs/"}})
config.include(".http")
# Register our row counting maintenance
config.include(".utils.row_counter")
# Scan everything for configuration
config.scan(
categories=(
"pyramid",
"warehouse",
),
ignore=["warehouse.migrations.env", "warehouse.celery", "warehouse.wsgi"],
)
# Sanity check our request and responses.
# Note: It is very important that this go last. We need everything else
# that might have added a tween to be registered prior to this.
config.include(".sanity")
# Finally, commit all of our changes
config.commit()
return config
|
RootFactory
|
python
|
doocs__leetcode
|
solution/3600-3699/3688.Bitwise OR of Even Numbers in an Array/Solution.py
|
{
"start": 0,
"end": 139
}
|
class ____:
def evenNumberBitwiseORs(self, nums: List[int]) -> int:
return reduce(or_, (x for x in nums if x % 2 == 0), 0)
|
Solution
|
python
|
Pylons__pyramid
|
tests/test_session.py
|
{
"start": 19396,
"end": 19868
}
|
class ____(unittest.TestCase):
def _makeOne(self, wrapped):
from pyramid.session import manage_changed
return manage_changed(wrapped)
def test_it(self):
request = testing.DummyRequest()
session = DummySessionFactory(request)
wrapper = self._makeOne(session.__class__.__setitem__)
wrapper(session, 'a', 1)
self.assertNotEqual(session.accessed, None)
self.assertTrue(session._dirty)
|
Test_manage_changed
|
python
|
pydantic__pydantic
|
pydantic/types.py
|
{
"start": 36376,
"end": 36673
}
|
class ____(BaseModel):
uuid6: UUID6
Model(uuid6=uuid.UUID('1efea953-c2d6-6790-aa0a-69db8c87df97'))
```
"""
UUID7 = Annotated[UUID, UuidVersion(7)]
"""A [UUID](https://docs.python.org/3/library/uuid.html) that must be version 7.
```python
import uuid
from pydantic import UUID7, BaseModel
|
Model
|
python
|
keon__algorithms
|
tests/test_maths.py
|
{
"start": 2927,
"end": 3242
}
|
class ____(unittest.TestCase):
"""[summary]
Test for the file extended_gcd.py
Arguments:
unittest {[type]} -- [description]
"""
def test_extended_gcd(self):
self.assertEqual((0, 1, 2), extended_gcd(8, 2))
self.assertEqual((0, 1, 17), extended_gcd(13, 17))
|
TestExtendedGcd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.