language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/namedTuple8.py | {
"start": 116,
"end": 183
} | class ____(NamedTuple, Generic[AnyStr]):
scheme: AnyStr
| GenericNT |
python | pytorch__pytorch | test/distributed/checkpoint/_experimental/test_checkpointer.py | {
"start": 1560,
"end": 16273
} | class ____(TestCase):
"""Parameterized tests that work with both sync and async checkpointers."""
def setUp(self):
super().setUp()
# Create a temporary directory for checkpoints
self.temp_dir = tempfile.mkdtemp()
# Create real objects for testing
self.rank_info = RankInfo(
global_world_size=1,
global_rank=0,
)
self.writer_config = CheckpointWriterConfig()
# Create reader for testing
self.reader = CheckpointReader(
rank_info=self.rank_info,
)
# Create test state dictionary
self.state_dict = {
"model": torch.nn.Linear(10, 5).state_dict(),
"optimizer": {"param_groups": [{"lr": 0.01}]},
"epoch": 5,
"step": 1000,
}
def tearDown(self):
# Clean up the temporary directory
shutil.rmtree(self.temp_dir)
def _create_sync_checkpointer(self) -> SyncCheckpointer:
"""Create a synchronous checkpointer."""
writer = CheckpointWriter(
config=self.writer_config,
rank_info=self.rank_info,
)
return SyncCheckpointer(writer, self.reader)
def _create_async_checkpointer(self) -> AsyncCheckpointer:
"""Create an asynchronous checkpointer."""
# Create staging config for async operations
# Use conservative settings to avoid CUDA issues in test environment
stager_config = CheckpointStagerConfig(
use_async_staging=True,
use_pinned_memory=False, # Disable to avoid CUDA memory issues
use_shared_memory=True,
use_non_blocking_copy=False, # Disable to avoid CUDA issues
)
# Create process config
process_config = CheckpointProcessConfig(
subprocess_init_timeout_secs=30,
subprocess_shutdown_timeout_secs=60,
)
# Create stager
checkpoint_stager = DefaultStager(stager_config)
# Create checkpoint process
checkpoint_process = CheckpointProcess(
rank_info=self.rank_info,
config=process_config,
subprocess_init_fn=subprocess_init_fn,
subprocess_init_args=(
"test-async-checkpointer",
os.getpid(),
),
checkpoint_writer_init_fn=ckpt_writer_init_fn,
checkpoint_writer_init_args={
"config": self.writer_config,
"rank_info": self.rank_info,
},
)
# Wait for process initialization
checkpoint_process.process_creation_future.result()
return AsyncCheckpointer(
checkpoint_stager=checkpoint_stager,
checkpoint_process=checkpoint_process,
reader=self.reader,
)
def _get_checkpointers(self):
"""Get both sync and async checkpointers for parameterized testing."""
return [
("sync", self._create_sync_checkpointer()),
("async", self._create_async_checkpointer()),
]
def _save_checkpoint(self, checkpointer: Checkpointer, path, state_dict, **kwargs):
"""Save checkpoint and handle both sync/async return values."""
result = checkpointer.save(path, state_dict, **kwargs)
return (None, None) if result is None else result
def _wait_for_save(self, stage_future, write_future):
"""Wait for save operation to complete."""
if write_future is not None:
write_future.result()
if stage_future is not None:
stage_future.result()
def test_save_and_load_basic(self):
"""Test basic save and load functionality for both sync and async."""
for checkpointer_type, checkpointer in self._get_checkpointers():
with self.subTest(checkpointer_type=checkpointer_type):
try:
checkpoint_path = os.path.join(
self.temp_dir, f"checkpoint_{checkpointer_type}"
)
# Save the checkpoint
stage_future, write_future = self._save_checkpoint(
checkpointer, checkpoint_path, self.state_dict
)
self._wait_for_save(stage_future, write_future)
# Verify that the checkpoint file exists
checkpoint_file = os.path.join(
checkpoint_path, f"checkpoint_{self.rank_info.global_rank}.pt"
)
self.assertTrue(os.path.exists(checkpoint_file))
# Load the checkpoint using the checkpointer
loaded_state_dict = checkpointer.load(checkpoint_path)
# Verify the loaded state dictionary
self.assertIn("model", loaded_state_dict)
self.assertIn("optimizer", loaded_state_dict)
self.assertEqual(loaded_state_dict["epoch"], 5)
self.assertEqual(loaded_state_dict["step"], 1000)
finally:
checkpointer.close()
def test_load_with_map_location(self):
"""Test loading with map_location for both sync and async."""
for checkpointer_type, checkpointer in self._get_checkpointers():
with self.subTest(checkpointer_type=checkpointer_type):
try:
checkpoint_path = os.path.join(
self.temp_dir, f"checkpoint_map_{checkpointer_type}"
)
# Save the checkpoint
stage_future, write_future = self._save_checkpoint(
checkpointer, checkpoint_path, self.state_dict
)
self._wait_for_save(stage_future, write_future)
# Load with map_location
loaded_state_dict = checkpointer.load(
checkpoint_path, default_map_location="cpu"
)
# Verify the loaded state dictionary
self.assertIn("model", loaded_state_dict)
self.assertEqual(loaded_state_dict["epoch"], 5)
finally:
checkpointer.close()
def test_partial_load(self):
"""Test partial loading for both sync and async."""
for checkpointer_type, checkpointer in self._get_checkpointers():
with self.subTest(checkpointer_type=checkpointer_type):
try:
checkpoint_path = os.path.join(
self.temp_dir, f"checkpoint_partial_{checkpointer_type}"
)
# Save the full checkpoint
stage_future, write_future = self._save_checkpoint(
checkpointer, checkpoint_path, self.state_dict
)
self._wait_for_save(stage_future, write_future)
# Create a partial state dictionary
partial_state_dict = {
"model": torch.nn.Linear(10, 5).state_dict(),
"epoch": None,
}
# Load only the keys in partial_state_dict
loaded_state_dict = checkpointer.load(
checkpoint_path, state_dict=partial_state_dict
)
# Verify partial loading worked
self.assertIn("model", loaded_state_dict)
self.assertIn("epoch", loaded_state_dict)
self.assertEqual(loaded_state_dict["epoch"], 5)
self.assertNotIn("step", loaded_state_dict)
self.assertNotIn("optimizer", loaded_state_dict)
finally:
checkpointer.close()
def test_load_strict_mode(self):
"""Test strict mode loading for both sync and async."""
for checkpointer_type, checkpointer in self._get_checkpointers():
with self.subTest(checkpointer_type=checkpointer_type):
try:
checkpoint_path = os.path.join(
self.temp_dir, f"checkpoint_strict_{checkpointer_type}"
)
# Save a checkpoint with limited keys
limited_state_dict = {"model": torch.nn.Linear(10, 5).state_dict()}
stage_future, write_future = self._save_checkpoint(
checkpointer, checkpoint_path, limited_state_dict
)
self._wait_for_save(stage_future, write_future)
# Try to load with more keys than exist in checkpoint
partial_state_dict = {
"model": torch.nn.Linear(10, 5).state_dict(),
"missing_key": None,
}
# Should raise error in strict mode
with self.assertRaises(RuntimeError) as cm:
checkpointer.load(
checkpoint_path, state_dict=partial_state_dict, strict=True
)
self.assertIn("missing keys", str(cm.exception))
# Should work without strict mode
loaded_state_dict = checkpointer.load(
checkpoint_path, state_dict=partial_state_dict, strict=False
)
self.assertIn("model", loaded_state_dict)
finally:
checkpointer.close()
def test_save_with_kwargs(self):
"""Test save with additional kwargs for both sync and async."""
for checkpointer_type, checkpointer in self._get_checkpointers():
with self.subTest(checkpointer_type=checkpointer_type):
try:
checkpoint_path = os.path.join(
self.temp_dir, f"checkpoint_kwargs_{checkpointer_type}"
)
# For sync checkpointer, we can pass arbitrary kwargs to the writer
# For async checkpointer, we test without kwargs to avoid conflicts
if checkpointer_type == "sync":
# Sync checkpointer passes kwargs directly to writer, so arbitrary kwargs are OK
stage_future, write_future = self._save_checkpoint(
checkpointer,
checkpoint_path,
self.state_dict,
custom_arg="test_value",
another_arg=42,
)
else:
# Async checkpointer has complex kwargs handling between stager and writer
# Just test basic save without kwargs to avoid conflicts
stage_future, write_future = self._save_checkpoint(
checkpointer,
checkpoint_path,
self.state_dict,
)
self._wait_for_save(stage_future, write_future)
# Verify checkpoint was created
checkpoint_file = os.path.join(
checkpoint_path, f"checkpoint_{self.rank_info.global_rank}.pt"
)
self.assertTrue(os.path.exists(checkpoint_file))
finally:
checkpointer.close()
def test_nested_dict_partial_load(self):
"""Test loading nested dictionaries partially for both sync and async."""
for checkpointer_type, checkpointer in self._get_checkpointers():
with self.subTest(checkpointer_type=checkpointer_type):
try:
# Create a checkpoint with nested dictionaries
nested_state_dict = {
"model": {
"layer1": {
"weight": torch.randn(5, 10),
"bias": torch.randn(5),
},
"layer2": {
"weight": torch.randn(2, 5),
"bias": torch.randn(2),
},
},
"metadata": {"epoch": 10, "step": 2000},
}
checkpoint_path = os.path.join(
self.temp_dir, f"checkpoint_nested_{checkpointer_type}"
)
# Save the nested state dict
stage_future, write_future = self._save_checkpoint(
checkpointer, checkpoint_path, nested_state_dict
)
self._wait_for_save(stage_future, write_future)
# Create a partial state dictionary with nested structure
partial_state_dict = {
"model": {
"layer1": {"weight": None}, # Only request layer1.weight
},
"metadata": {"epoch": None}, # Only request metadata.epoch
}
# Load only the keys in partial_state_dict
loaded_state_dict = checkpointer.load(
checkpoint_path, state_dict=partial_state_dict
)
# Verify that the nested keys were correctly loaded
self.assertIn("model", loaded_state_dict)
self.assertIn("layer1", loaded_state_dict["model"])
self.assertIn("weight", loaded_state_dict["model"]["layer1"])
self.assertIn("metadata", loaded_state_dict)
self.assertIn("epoch", loaded_state_dict["metadata"])
# Verify values were loaded correctly
self.assertTrue(
torch.allclose(
loaded_state_dict["model"]["layer1"]["weight"],
nested_state_dict["model"]["layer1"]["weight"],
)
)
self.assertEqual(loaded_state_dict["metadata"]["epoch"], 10)
# Verify that keys not in the partial_state_dict are not loaded
self.assertNotIn("layer2", loaded_state_dict["model"])
self.assertNotIn("step", loaded_state_dict["metadata"])
finally:
checkpointer.close()
| TestCheckpointer |
python | astropy__astropy | astropy/cosmology/_src/tests/io/base.py | {
"start": 2913,
"end": 4604
} | class ____(IOTestBase):
"""Directly test Cosmology I/O functions.
These functions are not public API and are discouraged from public use, in
favor of the I/O methods on |Cosmology|. They are tested b/c they are used
internally and because some tests for the methods on |Cosmology| don't need
to be run in the |Cosmology| class's large test matrix.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass.
"""
@pytest.fixture(scope="class", autouse=True)
def setup(self):
"""Setup and teardown for tests."""
@dataclass_decorator
class CosmologyWithKwargs(Cosmology):
Tcmb0: Parameter = Parameter(default=0, unit=u.K)
def __init__(
self, Tcmb0=0, name="cosmology with kwargs", meta=None, **kwargs
):
super().__init__(name=name, meta=meta)
self.__dict__["Tcmb0"] = Tcmb0 << u.K
yield # run tests
# pop CosmologyWithKwargs from registered classes
# but don't error b/c it can fail in parallel
_COSMOLOGY_CLASSES.pop(CosmologyWithKwargs.__qualname__, None)
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
"""Cosmology instance."""
if isinstance(request.param, str): # CosmologyWithKwargs
return _COSMOLOGY_CLASSES[request.param](Tcmb0=3)
return request.param
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
"""Cosmology classes."""
return cosmo.__class__
| IODirectTestBase |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 27648,
"end": 27777
} | class ____(sgqlc.types.Scalar):
"""A string containing HTML code."""
__schema__ = github_schema
ID = sgqlc.types.ID
| HTML |
python | python-pillow__Pillow | Tests/test_image_resample.py | {
"start": 1429,
"end": 8436
} | class ____:
def make_case(self, mode: str, size: tuple[int, int], color: int) -> Image.Image:
"""Makes a sample image with two dark and two bright squares.
For example:
e0 e0 1f 1f
e0 e0 1f 1f
1f 1f e0 e0
1f 1f e0 e0
"""
case = Image.new("L", size, 255 - color)
rectangle = ImageDraw.Draw(case).rectangle
rectangle((0, 0, size[0] // 2 - 1, size[1] // 2 - 1), color)
rectangle((size[0] // 2, size[1] // 2, size[0], size[1]), color)
return Image.merge(mode, [case] * len(mode))
def make_sample(self, data: str, size: tuple[int, int]) -> Image.Image:
"""Restores a sample image from given data string which contains
hex-encoded pixels from the top left fourth of a sample.
"""
data = data.replace(" ", "")
sample = Image.new("L", size)
s_px = sample.load()
assert s_px is not None
w, h = size[0] // 2, size[1] // 2
for y in range(h):
for x in range(w):
val = int(data[(y * w + x) * 2 : (y * w + x + 1) * 2], 16)
s_px[x, y] = val
s_px[size[0] - x - 1, size[1] - y - 1] = val
s_px[x, size[1] - y - 1] = 255 - val
s_px[size[0] - x - 1, y] = 255 - val
return sample
def check_case(self, case: Image.Image, sample: Image.Image) -> None:
s_px = sample.load()
c_px = case.load()
assert s_px is not None
assert c_px is not None
for y in range(case.size[1]):
for x in range(case.size[0]):
if c_px[x, y] != s_px[x, y]:
message = (
f"\nHave: \n{self.serialize_image(case)}\n"
f"\nExpected: \n{self.serialize_image(sample)}"
)
assert s_px[x, y] == c_px[x, y], message
def serialize_image(self, image: Image.Image) -> str:
s_px = image.load()
assert s_px is not None
return "\n".join(
" ".join(f"{s_px[x, y]:02x}" for x in range(image.size[0]))
for y in range(image.size[1])
)
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_box(self, mode: str) -> None:
case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.BOX)
# fmt: off
data = ("e1 e1"
"e1 e1")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_bilinear(self, mode: str) -> None:
case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.BILINEAR)
# fmt: off
data = ("e1 c9"
"c9 b7")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_hamming(self, mode: str) -> None:
case = self.make_case(mode, (8, 8), 0xE1)
case = case.resize((4, 4), Image.Resampling.HAMMING)
# fmt: off
data = ("e1 da"
"da d3")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_bicubic(self, mode: str) -> None:
case = self.make_case(mode, (12, 12), 0xE1)
case = case.resize((6, 6), Image.Resampling.BICUBIC)
# fmt: off
data = ("e1 e3 d4"
"e3 e5 d6"
"d4 d6 c9")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (6, 6)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_reduce_lanczos(self, mode: str) -> None:
case = self.make_case(mode, (16, 16), 0xE1)
case = case.resize((8, 8), Image.Resampling.LANCZOS)
# fmt: off
data = ("e1 e0 e4 d7"
"e0 df e3 d6"
"e4 e3 e7 da"
"d7 d6 d9 ce")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (8, 8)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_box(self, mode: str) -> None:
case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.BOX)
# fmt: off
data = ("e1 e1"
"e1 e1")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_bilinear(self, mode: str) -> None:
case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.BILINEAR)
# fmt: off
data = ("e1 b0"
"b0 98")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_hamming(self, mode: str) -> None:
case = self.make_case(mode, (2, 2), 0xE1)
case = case.resize((4, 4), Image.Resampling.HAMMING)
# fmt: off
data = ("e1 d2"
"d2 c5")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (4, 4)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_bicubic(self, mode: str) -> None:
case = self.make_case(mode, (4, 4), 0xE1)
case = case.resize((8, 8), Image.Resampling.BICUBIC)
# fmt: off
data = ("e1 e5 ee b9"
"e5 e9 f3 bc"
"ee f3 fd c1"
"b9 bc c1 a2")
# fmt: on
for channel in case.split():
self.check_case(channel, self.make_sample(data, (8, 8)))
@pytest.mark.parametrize("mode", ("RGBX", "RGB", "La", "L"))
def test_enlarge_lanczos(self, mode: str) -> None:
case = self.make_case(mode, (6, 6), 0xE1)
case = case.resize((12, 12), Image.Resampling.LANCZOS)
data = (
"e1 e0 db ed f5 b8"
"e0 df da ec f3 b7"
"db db d6 e7 ee b5"
"ed ec e6 fb ff bf"
"f5 f4 ee ff ff c4"
"b8 b7 b4 bf c4 a0"
)
for channel in case.split():
self.check_case(channel, self.make_sample(data, (12, 12)))
def test_box_filter_correct_range(self) -> None:
im = Image.new("RGB", (8, 8), "#1688ff").resize(
(100, 100), Image.Resampling.BOX
)
ref = Image.new("RGB", (100, 100), "#1688ff")
assert_image_equal(im, ref)
| TestImagingCoreResampleAccuracy |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/test_system_message.py | {
"start": 27614,
"end": 31828
} | class ____:
"""Test middleware that accepts SystemMessage return types."""
def test_middleware_can_return_system_message(self) -> None:
"""Test that middleware can return a SystemMessage with dynamic content."""
def dynamic_system_prompt_middleware(request: ModelRequest) -> SystemMessage:
"""Return a SystemMessage with dynamic content."""
region = getattr(request.runtime.context, "region", "n/a")
return SystemMessage(content=f"You are a helpful assistant. Region: {region}")
runtime = _fake_runtime(context={"region": "EU"})
request = ModelRequest(
model=GenericFakeChatModel(messages=iter([AIMessage(content="response")])),
system_message=None,
messages=[HumanMessage(content="Hello")],
tool_choice=None,
tools=[],
response_format=None,
state=cast("AgentState", {"messages": []}), # type: ignore[name-defined]
runtime=runtime,
model_settings={},
)
new_system_message = dynamic_system_prompt_middleware(request)
assert isinstance(new_system_message, SystemMessage)
assert len(new_system_message.content_blocks) == 1
assert (
new_system_message.content_blocks[0]["text"]
== "You are a helpful assistant. Region: EU"
)
def test_middleware_can_use_system_message_with_metadata(self) -> None:
"""Test middleware creating SystemMessage with additional metadata."""
def metadata_middleware(request: ModelRequest) -> SystemMessage:
"""Return SystemMessage with metadata."""
return SystemMessage(
content="You are a helpful assistant",
additional_kwargs={"temperature": 0.7, "model": "gpt-4"},
response_metadata={"region": "us-east"},
)
request = _make_request()
new_system_message = metadata_middleware(request)
assert len(new_system_message.content_blocks) == 1
assert new_system_message.content_blocks[0]["text"] == "You are a helpful assistant"
assert new_system_message.additional_kwargs == {
"temperature": 0.7,
"model": "gpt-4",
}
assert new_system_message.response_metadata == {"region": "us-east"}
def test_middleware_handles_none_system_message(self) -> None:
"""Test middleware creating new SystemMessage when none exists."""
def create_if_none_middleware(request: ModelRequest) -> SystemMessage:
"""Create a system message if none exists."""
if request.system_message is None:
return SystemMessage(content="Default system prompt")
return request.system_message
request = _make_request(system_message=None)
new_system_message = create_if_none_middleware(request)
assert isinstance(new_system_message, SystemMessage)
assert len(new_system_message.content_blocks) == 1
assert new_system_message.content_blocks[0]["text"] == "Default system prompt"
def test_middleware_with_content_blocks(self) -> None:
"""Test middleware creating SystemMessage with content blocks."""
def content_blocks_middleware(request: ModelRequest) -> SystemMessage:
"""Create SystemMessage with content blocks including cache control."""
return SystemMessage(
content=[
{"type": "text", "text": "Base instructions"},
{
"type": "text",
"text": "Cached instructions",
"cache_control": {"type": "ephemeral"},
},
]
)
request = _make_request()
new_system_message = content_blocks_middleware(request)
assert isinstance(new_system_message.content_blocks, list)
assert len(new_system_message.content_blocks) == 2
assert new_system_message.content_blocks[0]["text"] == "Base instructions"
assert new_system_message.content_blocks[1]["cache_control"] == {"type": "ephemeral"}
| TestDynamicSystemPromptMiddleware |
python | scipy__scipy | scipy/io/arff/_arffread.py | {
"start": 6807,
"end": 9703
} | class ____(Attribute):
def __init__(self, name, date_format, datetime_unit):
super().__init__(name)
self.date_format = date_format
self.datetime_unit = datetime_unit
self.type_name = 'date'
self.range = date_format
self.dtype = np.datetime64(0, self.datetime_unit)
@staticmethod
def _get_date_format(atrv):
m = r_date.match(atrv)
if m:
pattern = m.group(1).strip()
# convert time pattern from Java's SimpleDateFormat to C's format
datetime_unit = None
if "yyyy" in pattern:
pattern = pattern.replace("yyyy", "%Y")
datetime_unit = "Y"
elif "yy":
pattern = pattern.replace("yy", "%y")
datetime_unit = "Y"
if "MM" in pattern:
pattern = pattern.replace("MM", "%m")
datetime_unit = "M"
if "dd" in pattern:
pattern = pattern.replace("dd", "%d")
datetime_unit = "D"
if "HH" in pattern:
pattern = pattern.replace("HH", "%H")
datetime_unit = "h"
if "mm" in pattern:
pattern = pattern.replace("mm", "%M")
datetime_unit = "m"
if "ss" in pattern:
pattern = pattern.replace("ss", "%S")
datetime_unit = "s"
if "z" in pattern or "Z" in pattern:
raise ValueError("Date type attributes with time zone not "
"supported, yet")
if datetime_unit is None:
raise ValueError("Invalid or unsupported date format")
return pattern, datetime_unit
else:
raise ValueError("Invalid or no date format")
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For date attributes, the attribute string would be like
'date <format>'.
"""
attr_string_lower = attr_string.lower().strip()
if attr_string_lower[:len('date')] == 'date':
date_format, datetime_unit = cls._get_date_format(attr_string)
return cls(name, date_format, datetime_unit)
else:
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
"""
date_str = data_str.strip().strip("'").strip('"')
if date_str == '?':
return np.datetime64('NaT', self.datetime_unit)
else:
dt = datetime.datetime.strptime(date_str, self.date_format)
return np.datetime64(dt).astype(
f"datetime64[{self.datetime_unit}]")
def __str__(self):
return super().__str__() + ',' + self.date_format
| DateAttribute |
python | dask__distributed | distributed/broker.py | {
"start": 905,
"end": 3140
} | class ____:
_scheduler: Scheduler
_topics: defaultdict[str, Topic]
def __init__(self, maxlen: int, scheduler: Scheduler) -> None:
self._scheduler = scheduler
self._topics = defaultdict(partial(Topic, maxlen=maxlen))
def subscribe(self, topic: str, subscriber: str) -> None:
self._topics[topic].subscribe(subscriber)
def unsubscribe(self, topic: str, subscriber: str) -> None:
self._topics[topic].unsubscribe(subscriber)
def publish(self, topics: str | Collection[str], msg: Any) -> None:
event = (time(), msg)
if isinstance(topics, str):
topics = [topics]
for name in topics:
topic = self._topics[name]
topic.publish(event)
self._send_to_subscribers(name, event)
for plugin in list(self._scheduler.plugins.values()):
try:
plugin.log_event(name, msg)
except Exception:
logger.info("Plugin failed with exception", exc_info=True)
def truncate(self, topic: str | None = None) -> None:
if topic is None:
for _topic in self._topics.values():
_topic.truncate()
elif topic in self._topics:
self._topics[topic].truncate()
def _send_to_subscribers(self, topic: str, event: Any) -> None:
msg = {
"op": "event",
"topic": topic,
"event": event,
}
client_msgs = {client: [msg] for client in self._topics[topic].subscribers}
self._scheduler.send_all(client_msgs, worker_msgs={})
@overload
def get_events(self, topic: str) -> tuple[tuple[float, Any], ...]: ...
@overload
def get_events(
self, topic: None = None
) -> dict[str, tuple[tuple[float, Any], ...]]: ...
def get_events(
self, topic: str | None = None
) -> tuple[tuple[float, Any], ...] | dict[str, tuple[tuple[float, Any], ...]]:
if topic is not None:
return tuple(self._topics[topic].events)
else:
return {
name: tuple(topic.events)
for name, topic in self._topics.items()
if topic.events
}
| Broker |
python | google__flatbuffers | python/flatbuffers/reflection/Schema.py | {
"start": 179,
"end": 7978
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Schema()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSchema(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def SchemaBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x42\x46\x42\x53", size_prefixed=size_prefixed)
# Schema
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Schema
def Objects(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from reflection.Object import Object
obj = Object()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Schema
def ObjectsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Schema
def ObjectsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# Schema
def Enums(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from reflection.Enum import Enum
obj = Enum()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Schema
def EnumsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Schema
def EnumsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# Schema
def FileIdent(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Schema
def FileExt(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Schema
def RootTable(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from reflection.Object import Object
obj = Object()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Schema
def Services(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from reflection.Service import Service
obj = Service()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Schema
def ServicesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Schema
def ServicesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
return o == 0
# Schema
def AdvancedFeatures(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# All the files used in this compilation. Files are relative to where
# flatc was invoked.
# Schema
def FbsFiles(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from reflection.SchemaFile import SchemaFile
obj = SchemaFile()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Schema
def FbsFilesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Schema
def FbsFilesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
return o == 0
def SchemaStart(builder):
builder.StartObject(8)
def Start(builder):
SchemaStart(builder)
def SchemaAddObjects(builder, objects):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(objects), 0)
def AddObjects(builder, objects):
SchemaAddObjects(builder, objects)
def SchemaStartObjectsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartObjectsVector(builder, numElems):
return SchemaStartObjectsVector(builder, numElems)
def SchemaAddEnums(builder, enums):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(enums), 0)
def AddEnums(builder, enums):
SchemaAddEnums(builder, enums)
def SchemaStartEnumsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartEnumsVector(builder, numElems):
return SchemaStartEnumsVector(builder, numElems)
def SchemaAddFileIdent(builder, fileIdent):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(fileIdent), 0)
def AddFileIdent(builder, fileIdent):
SchemaAddFileIdent(builder, fileIdent)
def SchemaAddFileExt(builder, fileExt):
builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(fileExt), 0)
def AddFileExt(builder, fileExt):
SchemaAddFileExt(builder, fileExt)
def SchemaAddRootTable(builder, rootTable):
builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(rootTable), 0)
def AddRootTable(builder, rootTable):
SchemaAddRootTable(builder, rootTable)
def SchemaAddServices(builder, services):
builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(services), 0)
def AddServices(builder, services):
SchemaAddServices(builder, services)
def SchemaStartServicesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartServicesVector(builder, numElems):
return SchemaStartServicesVector(builder, numElems)
def SchemaAddAdvancedFeatures(builder, advancedFeatures):
builder.PrependUint64Slot(6, advancedFeatures, 0)
def AddAdvancedFeatures(builder, advancedFeatures):
SchemaAddAdvancedFeatures(builder, advancedFeatures)
def SchemaAddFbsFiles(builder, fbsFiles):
builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(fbsFiles), 0)
def AddFbsFiles(builder, fbsFiles):
SchemaAddFbsFiles(builder, fbsFiles)
def SchemaStartFbsFilesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartFbsFilesVector(builder, numElems):
return SchemaStartFbsFilesVector(builder, numElems)
def SchemaEnd(builder):
return builder.EndObject()
def End(builder):
return SchemaEnd(builder)
| Schema |
python | langchain-ai__langchain | libs/partners/groq/langchain_groq/chat_models.py | {
"start": 2333,
"end": 61216
} | class ____(BaseChatModel):
r"""Groq Chat large language models API.
To use, you should have the
environment variable `GROQ_API_KEY` set with your API key.
Any parameters that are valid to be passed to the groq.create call
can be passed in, even if not explicitly saved on this class.
Setup:
Install `langchain-groq` and set environment variable
`GROQ_API_KEY`.
```bash
pip install -U langchain-groq
export GROQ_API_KEY="your-api-key"
```
Key init args — completion params:
model:
Name of Groq model to use, e.g. `llama-3.1-8b-instant`.
temperature:
Sampling temperature. Ranges from `0.0` to `1.0`.
max_tokens:
Max number of tokens to generate.
reasoning_format:
The format for reasoning output. Groq will default to `raw` if left
undefined.
- `'parsed'`: Separates reasoning into a dedicated field while keeping the
response concise. Reasoning will be returned in the
`additional_kwargs.reasoning_content` field of the response.
- `'raw'`: Includes reasoning within think tags (e.g.
`<think>{reasoning_content}</think>`).
- `'hidden'`: Returns only the final answer content. Note: this only
suppresses reasoning content in the response; the model will still perform
reasoning unless overridden in `reasoning_effort`.
See the [Groq documentation](https://console.groq.com/docs/reasoning#reasoning)
for more details and a list of supported models.
model_kwargs:
Holds any model parameters valid for create call not
explicitly specified.
Key init args — client params:
timeout:
Timeout for requests.
max_retries:
Max number of retries.
api_key:
Groq API key. If not passed in will be read from env var `GROQ_API_KEY`.
base_url:
Base URL path for API requests, leave blank if not using a proxy
or service emulator.
custom_get_token_ids:
Optional encoder to use for counting tokens.
See full list of supported init args and their descriptions in the params
section.
Instantiate:
```python
from langchain_groq import ChatGroq
model = ChatGroq(
model="llama-3.1-8b-instant",
temperature=0.0,
max_retries=2,
# other params...
)
```
Invoke:
```python
messages = [
("system", "You are a helpful translator. Translate the user sentence to French."),
("human", "I love programming."),
]
model.invoke(messages)
```
```python
AIMessage(content='The English sentence "I love programming" can
be translated to French as "J\'aime programmer". The word
"programming" is translated as "programmer" in French.',
response_metadata={'token_usage': {'completion_tokens': 38,
'prompt_tokens': 28, 'total_tokens': 66, 'completion_time':
0.057975474, 'prompt_time': 0.005366091, 'queue_time': None,
'total_time': 0.063341565}, 'model_name': 'llama-3.1-8b-instant',
'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop',
'logprobs': None}, id='run-ecc71d70-e10c-4b69-8b8c-b8027d95d4b8-0')
```
Stream:
```python
# Streaming `text` for each content chunk received
for chunk in model.stream(messages):
print(chunk.text, end="")
```
```python
content='' id='run-4e9f926b-73f5-483b-8ef5-09533d925853'
content='The' id='run-4e9f926b-73f5-483b-8ef5-09533d925853'
content=' English' id='run-4e9f926b-73f5-483b-8ef5-09533d925853'
content=' sentence' id='run-4e9f926b-73f5-483b-8ef5-09533d925853'
...
content=' program' id='run-4e9f926b-73f5-483b-8ef5-09533d925853'
content='".' id='run-4e9f926b-73f5-483b-8ef5-09533d925853'
content='' response_metadata={'finish_reason': 'stop'}
id='run-4e9f926b-73f5-483b-8ef5-09533d925853
```
```python
# Reconstructing a full response
stream = model.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
```
```python
AIMessageChunk(content='The English sentence "I love programming"
can be translated to French as "J\'aime programmer". Here\'s the
breakdown of the sentence: "J\'aime" is the French equivalent of "
I love", and "programmer" is the French infinitive for "to program".
So, the literal translation is "I love to program". However, in
English we often omit the "to" when talking about activities we
love, and the same applies to French. Therefore, "J\'aime
programmer" is the correct and natural way to express "I love
programming" in French.', response_metadata={'finish_reason':
'stop'}, id='run-a3c35ac4-0750-4d08-ac55-bfc63805de76')
```
Async:
```python
await model.ainvoke(messages)
```
```python
AIMessage(content='The English sentence "I love programming" can
be translated to French as "J\'aime programmer". The word
"programming" is translated as "programmer" in French. I hope
this helps! Let me know if you have any other questions.',
response_metadata={'token_usage': {'completion_tokens': 53,
'prompt_tokens': 28, 'total_tokens': 81, 'completion_time':
0.083623752, 'prompt_time': 0.007365126, 'queue_time': None,
'total_time': 0.090988878}, 'model_name': 'llama-3.1-8b-instant',
'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop',
'logprobs': None}, id='run-897f3391-1bea-42e2-82e0-686e2367bcf8-0')
```
Tool calling:
```python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
ai_msg = model_with_tools.invoke("What is the population of NY?")
ai_msg.tool_calls
```
```python
[
{
"name": "GetPopulation",
"args": {"location": "NY"},
"id": "call_bb8d",
}
]
```
See `ChatGroq.bind_tools()` method for more.
Structured output:
```python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats")
```
```python
Joke(
setup="Why don't cats play poker in the jungle?",
punchline="Too many cheetahs!",
rating=None,
)
```
See `ChatGroq.with_structured_output()` for more.
Response metadata:
```python
ai_msg = model.invoke(messages)
ai_msg.response_metadata
```
```python
{
"token_usage": {
"completion_tokens": 70,
"prompt_tokens": 28,
"total_tokens": 98,
"completion_time": 0.111956391,
"prompt_time": 0.007518279,
"queue_time": None,
"total_time": 0.11947467,
},
"model_name": "llama-3.1-8b-instant",
"system_fingerprint": "fp_c5f20b5bb1",
"finish_reason": "stop",
"logprobs": None,
}
```
""" # noqa: E501
client: Any = Field(default=None, exclude=True)
async_client: Any = Field(default=None, exclude=True)
model_name: str = Field(alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
stop: list[str] | str | None = Field(default=None, alias="stop_sequences")
"""Default stop sequences."""
reasoning_format: Literal["parsed", "raw", "hidden"] | None = Field(default=None)
"""The format for reasoning output. Groq will default to raw if left undefined.
- `'parsed'`: Separates reasoning into a dedicated field while keeping the
response concise. Reasoning will be returned in the
`additional_kwargs.reasoning_content` field of the response.
- `'raw'`: Includes reasoning within think tags (e.g.
`<think>{reasoning_content}</think>`).
- `'hidden'`: Returns only the final answer content. Note: this only suppresses
reasoning content in the response; the model will still perform reasoning unless
overridden in `reasoning_effort`.
See the [Groq documentation](https://console.groq.com/docs/reasoning#reasoning)
for more details and a list of supported models.
"""
reasoning_effort: str | None = Field(default=None)
"""The level of effort the model will put into reasoning. Groq will default to
enabling reasoning if left undefined.
See the [Groq documentation](https://console.groq.com/docs/reasoning#options-for-reasoning-effort)
for more details and a list of options and models that support setting a reasoning
effort.
"""
model_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
groq_api_key: SecretStr | None = Field(
alias="api_key", default_factory=secret_from_env("GROQ_API_KEY", default=None)
)
"""Automatically inferred from env var `GROQ_API_KEY` if not provided."""
groq_api_base: str | None = Field(
alias="base_url", default_factory=from_env("GROQ_API_BASE", default=None)
)
"""Base URL path for API requests. Leave blank if not using a proxy or service
emulator.
"""
# to support explicit proxy for Groq
groq_proxy: str | None = Field(default_factory=from_env("GROQ_PROXY", default=None))
request_timeout: float | tuple[float, float] | Any | None = Field(
default=None, alias="timeout"
)
"""Timeout for requests to Groq completion API. Can be float, `httpx.Timeout` or
`None`.
"""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: int | None = None
"""Maximum number of tokens to generate."""
service_tier: Literal["on_demand", "flex", "auto"] = Field(default="on_demand")
"""Optional parameter that you can include to specify the service tier you'd like to
use for requests.
- `'on_demand'`: Default.
- `'flex'`: On-demand processing when capacity is available, with rapid timeouts
if resources are constrained. Provides balance between performance and
reliability for workloads that don't require guaranteed processing.
- `'auto'`: Uses on-demand rate limits, then falls back to `'flex'` if those
limits are exceeded
See the [Groq documentation](https://console.groq.com/docs/flex-processing) for more
details and a list of service tiers and descriptions.
"""
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: Any | None = None
"""Optional `httpx.Client`."""
http_async_client: Any | None = None
"""Optional `httpx.AsyncClient`. Only used for async invocations. Must specify
`http_client` as well if you'd like a custom client for sync invocations."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
msg = f"Found {field_name} supplied twice."
raise ValueError(msg)
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended.""",
stacklevel=2,
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
msg = (
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
raise ValueError(msg)
values["model_kwargs"] = extra
return values
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if self.n < 1:
msg = "n must be at least 1."
raise ValueError(msg)
if self.n > 1 and self.streaming:
msg = "n must be 1 when streaming."
raise ValueError(msg)
if self.temperature == 0:
self.temperature = 1e-8
default_headers = {"User-Agent": f"langchain/{__version__}"} | dict(
self.default_headers or {}
)
client_params: dict[str, Any] = {
"api_key": (
self.groq_api_key.get_secret_value() if self.groq_api_key else None
),
"base_url": self.groq_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": default_headers,
"default_query": self.default_query,
}
try:
import groq # noqa: PLC0415
sync_specific: dict[str, Any] = {"http_client": self.http_client}
if not self.client:
self.client = groq.Groq(
**client_params, **sync_specific
).chat.completions
if not self.async_client:
async_specific: dict[str, Any] = {"http_client": self.http_async_client}
self.async_client = groq.AsyncGroq(
**client_params, **async_specific
).chat.completions
except ImportError as exc:
msg = (
"Could not import groq python package. "
"Please install it with `pip install groq`."
)
raise ImportError(msg) from exc
return self
@model_validator(mode="after")
def _set_model_profile(self) -> Self:
"""Set model profile if not overridden."""
if self.profile is None:
self.profile = _get_default_model_profile(self.model_name)
return self
#
# Serializable class method overrides
#
@property
def lc_secrets(self) -> dict[str, str]:
"""Mapping of secret environment variables."""
return {"groq_api_key": "GROQ_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by LangChain."""
return True
#
# BaseChatModel method overrides
#
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "groq-chat"
def _get_ls_params(
self, stop: list[str] | None = None, **kwargs: Any
) -> LangSmithParams:
"""Get standard params for tracing."""
params = self._get_invocation_params(stop=stop, **kwargs)
ls_params = LangSmithParams(
ls_provider="groq",
ls_model_name=params.get("model", self.model_name),
ls_model_type="chat",
ls_temperature=params.get("temperature", self.temperature),
)
if ls_max_tokens := params.get("max_tokens", self.max_tokens):
ls_params["ls_max_tokens"] = ls_max_tokens
if ls_stop := stop or params.get("stop", None) or self.stop:
ls_params["ls_stop"] = ls_stop if isinstance(ls_stop, list) else [ls_stop]
return ls_params
def _should_stream(
self,
*,
async_api: bool,
run_manager: CallbackManagerForLLMRun
| AsyncCallbackManagerForLLMRun
| None = None,
**kwargs: Any,
) -> bool:
"""Determine if a given model call should hit the streaming API."""
base_should_stream = super()._should_stream(
async_api=async_api, run_manager=run_manager, **kwargs
)
if base_should_stream and ("response_format" in kwargs):
# Streaming not supported in JSON mode or structured outputs.
response_format = kwargs["response_format"]
if isinstance(response_format, dict) and response_format.get("type") in {
"json_schema",
"json_object",
}:
return False
return base_should_stream
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {
**params,
**kwargs,
}
response = self.client.create(messages=message_dicts, **params)
return self._create_chat_result(response, params)
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {
**params,
**kwargs,
}
response = await self.async_client.create(messages=message_dicts, **params)
return self._create_chat_result(response, params)
def _stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class: type[BaseMessageChunk] = AIMessageChunk
for chunk in self.client.create(messages=message_dicts, **params):
if not isinstance(chunk, dict):
chunk = chunk.model_dump() # noqa: PLW2901
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
message_chunk = _convert_chunk_to_message_chunk(chunk, default_chunk_class)
generation_info = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
generation_info["model_name"] = self.model_name
if system_fingerprint := chunk.get("system_fingerprint"):
generation_info["system_fingerprint"] = system_fingerprint
service_tier = params.get("service_tier") or self.service_tier
generation_info["service_tier"] = service_tier
reasoning_effort = (
params.get("reasoning_effort") or self.reasoning_effort
)
if reasoning_effort:
generation_info["reasoning_effort"] = reasoning_effort
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
if generation_info:
message_chunk = message_chunk.model_copy(
update={"response_metadata": generation_info}
)
default_chunk_class = message_chunk.__class__
generation_chunk = ChatGenerationChunk(
message=message_chunk, generation_info=generation_info or None
)
if run_manager:
run_manager.on_llm_new_token(
generation_chunk.text, chunk=generation_chunk, logprobs=logprobs
)
yield generation_chunk
async def _astream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class: type[BaseMessageChunk] = AIMessageChunk
async for chunk in await self.async_client.create(
messages=message_dicts, **params
):
if not isinstance(chunk, dict):
chunk = chunk.model_dump() # noqa: PLW2901
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
message_chunk = _convert_chunk_to_message_chunk(chunk, default_chunk_class)
generation_info = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
generation_info["model_name"] = self.model_name
if system_fingerprint := chunk.get("system_fingerprint"):
generation_info["system_fingerprint"] = system_fingerprint
service_tier = params.get("service_tier") or self.service_tier
generation_info["service_tier"] = service_tier
reasoning_effort = (
params.get("reasoning_effort") or self.reasoning_effort
)
if reasoning_effort:
generation_info["reasoning_effort"] = reasoning_effort
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
if generation_info:
message_chunk = message_chunk.model_copy(
update={"response_metadata": generation_info}
)
default_chunk_class = message_chunk.__class__
generation_chunk = ChatGenerationChunk(
message=message_chunk, generation_info=generation_info or None
)
if run_manager:
await run_manager.on_llm_new_token(
token=generation_chunk.text,
chunk=generation_chunk,
logprobs=logprobs,
)
yield generation_chunk
#
# Internal methods
#
@property
def _default_params(self) -> dict[str, Any]:
"""Get the default parameters for calling Groq API."""
params = {
"model": self.model_name,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
"stop": self.stop,
"reasoning_format": self.reasoning_format,
"reasoning_effort": self.reasoning_effort,
"service_tier": self.service_tier,
**self.model_kwargs,
}
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
return params
def _create_chat_result(
self, response: dict | BaseModel, params: dict
) -> ChatResult:
generations = []
if not isinstance(response, dict):
response = response.model_dump()
token_usage = response.get("usage", {})
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
if token_usage and isinstance(message, AIMessage):
message.usage_metadata = _create_usage_metadata(token_usage)
generation_info = {"finish_reason": res.get("finish_reason")}
if "logprobs" in res:
generation_info["logprobs"] = res["logprobs"]
gen = ChatGeneration(
message=message,
generation_info=generation_info,
)
generations.append(gen)
llm_output = {
"token_usage": token_usage,
"model_name": self.model_name,
"system_fingerprint": response.get("system_fingerprint", ""),
}
llm_output["service_tier"] = params.get("service_tier") or self.service_tier
reasoning_effort = params.get("reasoning_effort") or self.reasoning_effort
if reasoning_effort:
llm_output["reasoning_effort"] = reasoning_effort
return ChatResult(generations=generations, llm_output=llm_output)
def _create_message_dicts(
self, messages: list[BaseMessage], stop: list[str] | None
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
params = self._default_params
if stop is not None:
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _combine_llm_outputs(self, llm_outputs: list[dict | None]) -> dict:
overall_token_usage: dict = {}
system_fingerprint = None
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
if token_usage is not None:
for k, v in token_usage.items():
if k in overall_token_usage and v is not None:
# Handle nested dictionaries
if isinstance(v, dict):
if k not in overall_token_usage:
overall_token_usage[k] = {}
for nested_k, nested_v in v.items():
if (
nested_k in overall_token_usage[k]
and nested_v is not None
):
overall_token_usage[k][nested_k] += nested_v
else:
overall_token_usage[k][nested_k] = nested_v
else:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
if system_fingerprint is None:
system_fingerprint = output.get("system_fingerprint")
combined = {"token_usage": overall_token_usage, "model_name": self.model_name}
if system_fingerprint:
combined["system_fingerprint"] = system_fingerprint
if self.service_tier:
combined["service_tier"] = self.service_tier
return combined
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type[BaseModel] | Callable | BaseTool],
*,
tool_choice: dict | str | bool | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
"""Bind tool-like objects to this chat model.
Args:
tools: A list of tool definitions to bind to this chat model.
Supports any tool definition handled by
`langchain_core.utils.function_calling.convert_to_openai_tool`.
tool_choice: Which tool to require the model to call.
Must be the name of the single provided function,
`'auto'` to automatically determine which function to call
with the option to not call any function, `'any'` to enforce that some
function is called, or a dict of the form:
`{"type": "function", "function": {"name": <<tool_name>>}}`.
**kwargs: Any additional parameters to pass to the
`langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
if tool_choice is not None and tool_choice:
if tool_choice == "any":
tool_choice = "required"
if isinstance(tool_choice, str) and (
tool_choice not in ("auto", "none", "required")
):
tool_choice = {"type": "function", "function": {"name": tool_choice}}
if isinstance(tool_choice, bool):
if len(tools) > 1:
msg = (
"tool_choice can only be True when there is one tool. Received "
f"{len(tools)} tools."
)
raise ValueError(msg)
tool_name = formatted_tools[0]["function"]["name"]
tool_choice = {
"type": "function",
"function": {"name": tool_name},
}
kwargs["tool_choice"] = tool_choice
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: dict | type[BaseModel] | None = None,
*,
method: Literal[
"function_calling", "json_mode", "json_schema"
] = "function_calling",
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, dict | BaseModel]:
r"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema. Can be passed in as:
- An OpenAI function/tool schema,
- A JSON Schema,
- A `TypedDict` class,
- Or a Pydantic class.
If `schema` is a Pydantic class then the model output will be a
Pydantic instance of that class, and the model-generated fields will be
validated by the Pydantic class. Otherwise the model output will be a
dict and will not be validated.
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
more on how to properly specify types and descriptions of schema fields
when specifying a Pydantic or `TypedDict` class.
!!! warning "Behavior changed in `langchain-groq` 0.3.8"
Added support for Groq's dedicated structured output feature via
`method="json_schema"`.
method: The method for steering model generation, one of:
- `'function_calling'`:
Uses Groq's tool-calling [API](https://console.groq.com/docs/tool-use)
- `'json_schema'`:
Uses Groq's [Structured Output API](https://console.groq.com/docs/structured-outputs).
Supported for a subset of models, including `openai/gpt-oss`,
`moonshotai/kimi-k2-instruct-0905`, and some `meta-llama/llama-4`
models. See [docs](https://console.groq.com/docs/structured-outputs)
for details.
- `'json_mode'`:
Uses Groq's [JSON mode](https://console.groq.com/docs/structured-outputs#json-object-mode).
Note that if using JSON mode then you must include instructions for
formatting the output into the desired schema into the model call
Learn more about the differences between the methods and which models
support which methods [here](https://console.groq.com/docs/structured-outputs).
method:
The method for steering model generation, either `'function_calling'`
or `'json_mode'`. If `'function_calling'` then the schema will be converted
to an OpenAI function and the returned model will make use of the
function-calling API. If `'json_mode'` then JSON mode will be used.
!!! note
If using `'json_mode'` then you must include instructions for formatting
the output into the desired schema into the model call. (either via the
prompt itself or in the system message/prompt/instructions).
!!! warning
`'json_mode'` does not support streaming responses stop sequences.
include_raw:
If `False` then only the parsed structured output is returned.
If an error occurs during model output parsing it will be raised.
If `True` then both the raw model response (a `BaseMessage`) and the
parsed model response will be returned.
If an error occurs during output parsing it will be caught and returned
as well.
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
`'parsing_error'`.
kwargs:
Any additional parameters to pass to the `langchain.runnable.Runnable`
constructor.
Returns:
A `Runnable` that takes same inputs as a
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
`False` then `Runnable` outputs a `dict`.
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
- `'raw'`: `BaseMessage`
- `'parsed'`: `None` if there was a parsing error, otherwise the type
depends on the `schema` as described above.
- `'parsing_error'`: `BaseException | None`
Example: schema=Pydantic class, method="function_calling", include_raw=False:
```python
from typing import Optional
from langchain_groq import ChatGroq
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
# If we provide default values and/or descriptions for fields, these will be passed
# to the model. This is an important part of improving a model's ability to
# correctly return structured outputs.
justification: str | None = Field(default=None, description="A justification for the answer.")
model = ChatGroq(model="openai/gpt-oss-120b", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
```
Example: schema=Pydantic class, method="function_calling", include_raw=True:
```python
from langchain_groq import ChatGroq
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
model = ChatGroq(model="openai/gpt-oss-120b", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification,
include_raw=True,
)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
```
Example: schema=TypedDict class, method="function_calling", include_raw=False:
```python
from typing_extensions import Annotated, TypedDict
from langchain_groq import ChatGroq
class AnswerWithJustification(TypedDict):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: Annotated[str | None, None, "A justification for the answer."]
model = ChatGroq(model="openai/gpt-oss-120b", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```
Example: schema=OpenAI function schema, method="function_calling", include_raw=False:
```python
from langchain_groq import ChatGroq
oai_schema = {
'name': 'AnswerWithJustification',
'description': 'An answer to the user question along with justification for the answer.',
'parameters': {
'type': 'object',
'properties': {
'answer': {'type': 'string'},
'justification': {'description': 'A justification for the answer.', 'type': 'string'}
},
'required': ['answer']
}
model = ChatGroq(model="openai/gpt-oss-120b", temperature=0)
structured_model = model.with_structured_output(oai_schema)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```
Example: schema=Pydantic class, method="json_schema", include_raw=False:
```python
from typing import Optional
from langchain_groq import ChatGroq
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
# If we provide default values and/or descriptions for fields, these will be passed
# to the model. This is an important part of improving a model's ability to
# correctly return structured outputs.
justification: str | None = Field(default=None, description="A justification for the answer.")
model = ChatGroq(model="openai/gpt-oss-120b", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification,
method="json_schema",
)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
```
Example: schema=Pydantic class, method="json_mode", include_raw=True:
```python
from langchain_groq import ChatGroq
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
answer: str
justification: str
model = ChatGroq(model="openai/gpt-oss-120b", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification, method="json_mode", include_raw=True
)
structured_model.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{\n "answer": "They are both the same weight.",\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \n}'),
# 'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'),
# 'parsing_error': None
# }
```
""" # noqa: E501
_ = kwargs.pop("strict", None)
if kwargs:
msg = f"Received unsupported arguments {kwargs}"
raise ValueError(msg)
is_pydantic_schema = _is_pydantic_class(schema)
if method == "function_calling":
if schema is None:
msg = (
"schema must be specified when method is 'function_calling'. "
"Received None."
)
raise ValueError(msg)
formatted_tool = convert_to_openai_tool(schema)
tool_name = formatted_tool["function"]["name"]
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
ls_structured_output_format={
"kwargs": {"method": "function_calling"},
"schema": formatted_tool,
},
)
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True, # type: ignore[list-item]
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name, first_tool_only=True
)
elif method == "json_schema":
# Use structured outputs (json_schema) for models that support it
# Convert schema to JSON Schema format for structured outputs
if schema is None:
msg = (
"schema must be specified when method is 'json_schema'. "
"Received None."
)
raise ValueError(msg)
json_schema = convert_to_json_schema(schema)
schema_name = json_schema.get("title", "")
response_format = {
"type": "json_schema",
"json_schema": {"name": schema_name, "schema": json_schema},
}
ls_format_info = {
"kwargs": {"method": "json_schema"},
"schema": json_schema,
}
llm = self.bind(
response_format=response_format,
ls_structured_output_format=ls_format_info,
)
output_parser = (
PydanticOutputParser(pydantic_object=schema) # type: ignore[type-var, arg-type]
if is_pydantic_schema
else JsonOutputParser()
)
elif method == "json_mode":
llm = self.bind(
response_format={"type": "json_object"},
ls_structured_output_format={
"kwargs": {"method": "json_mode"},
"schema": schema,
},
)
output_parser = (
PydanticOutputParser(pydantic_object=schema) # type: ignore[type-var, arg-type]
if is_pydantic_schema
else JsonOutputParser()
)
else:
msg = (
f"Unrecognized method argument. Expected one of 'function_calling' or "
f"'json_mode'. Received: '{method}'"
)
raise ValueError(msg)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
return llm | output_parser
def _is_pydantic_class(obj: Any) -> bool:
return isinstance(obj, type) and is_basemodel_subclass(obj)
#
# Type conversion helpers
#
def _convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
# Translate v1 content
if message.response_metadata.get("output_version") == "v1":
new_content, new_additional_kwargs = _convert_from_v1_to_groq(
message.content_blocks, message.response_metadata.get("model_provider")
)
message = message.model_copy(
update={
"content": new_content,
"additional_kwargs": new_additional_kwargs,
}
)
message_dict = {"role": "assistant", "content": message.content}
# If content is a list of content blocks, filter out tool_call blocks
# as Groq API only accepts 'text' type blocks in content
if isinstance(message.content, list):
text_blocks = [
block
for block in message.content
if isinstance(block, dict) and block.get("type") == "text"
]
message_dict["content"] = text_blocks if text_blocks else ""
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
if message.tool_calls or message.invalid_tool_calls:
message_dict["tool_calls"] = [
_lc_tool_call_to_groq_tool_call(tc) for tc in message.tool_calls
] + [
_lc_invalid_tool_call_to_groq_tool_call(tc)
for tc in message.invalid_tool_calls
]
# If tool calls only (no text blocks), content is None not empty string
if message_dict["content"] == "" or (
isinstance(message_dict["content"], list)
and not message_dict["content"]
):
message_dict["content"] = None
elif "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
# If tool calls only, content is None not empty string
if message_dict["content"] == "" or (
isinstance(message_dict["content"], list)
and not message_dict["content"]
):
message_dict["content"] = None
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"content": message.content,
"tool_call_id": message.tool_call_id,
}
else:
msg = f"Got unknown type {message}"
raise TypeError(msg)
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
def _convert_chunk_to_message_chunk(
chunk: Mapping[str, Any], default_class: type[BaseMessageChunk]
) -> BaseMessageChunk:
choice = chunk["choices"][0]
_dict = choice["delta"]
role = cast("str", _dict.get("role"))
content = cast("str", _dict.get("content") or "")
additional_kwargs: dict = {}
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
if _dict.get("tool_calls"):
# Groq sends 'null' (JSON null) for tools with no arguments, but we
# expect '{}' (empty JSON object) to represent empty arguments
tool_calls = _dict["tool_calls"]
for tool_call in tool_calls:
if (
tool_call.get("function")
and tool_call["function"].get("arguments") == "null"
):
tool_call["function"]["arguments"] = "{}"
additional_kwargs["tool_calls"] = tool_calls
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
if role == "assistant" or default_class == AIMessageChunk:
if reasoning := _dict.get("reasoning"):
additional_kwargs["reasoning_content"] = reasoning
if executed_tools := _dict.get("executed_tools"):
additional_kwargs["executed_tools"] = []
for executed_tool in executed_tools:
if executed_tool.get("output"):
# Tool output duplicates query and other server tool call data
additional_kwargs["executed_tools"].append(
{
k: executed_tool[k]
for k in ("index", "output")
if k in executed_tool
}
)
else:
additional_kwargs["executed_tools"].append(
{k: executed_tool[k] for k in executed_tool if k != "output"}
)
if usage := (chunk.get("x_groq") or {}).get("usage"):
usage_metadata = _create_usage_metadata(usage)
else:
usage_metadata = None
return AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
usage_metadata=usage_metadata, # type: ignore[arg-type]
response_metadata={"model_provider": "groq"},
)
if role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
if role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
if role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(content=content, tool_call_id=_dict["tool_call_id"])
if role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
return default_class(content=content) # type: ignore[call-arg]
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
"""Convert a dictionary to a LangChain message.
Args:
_dict: The dictionary.
Returns:
The LangChain message.
"""
id_ = _dict.get("id")
role = _dict.get("role")
if role == "user":
return HumanMessage(content=_dict.get("content", ""))
if role == "assistant":
content = _dict.get("content", "") or ""
additional_kwargs: dict = {}
if reasoning := _dict.get("reasoning"):
additional_kwargs["reasoning_content"] = reasoning
if executed_tools := _dict.get("executed_tools"):
additional_kwargs["executed_tools"] = executed_tools
if function_call := _dict.get("function_call"):
additional_kwargs["function_call"] = dict(function_call)
tool_calls = []
invalid_tool_calls = []
if raw_tool_calls := _dict.get("tool_calls"):
# Groq sends 'null' (JSON null) for tools with no arguments, but we
# expect '{}' (empty JSON object) to represent empty arguments
for raw_tool_call in raw_tool_calls:
if (
raw_tool_call.get("function")
and raw_tool_call["function"].get("arguments") == "null"
):
raw_tool_call["function"]["arguments"] = "{}"
additional_kwargs["tool_calls"] = raw_tool_calls
for raw_tool_call in raw_tool_calls:
try:
tool_calls.append(parse_tool_call(raw_tool_call, return_id=True))
except Exception as e: # pylint: disable=broad-except
invalid_tool_calls.append(
make_invalid_tool_call(raw_tool_call, str(e))
)
return AIMessage(
content=content,
id=id_,
additional_kwargs=additional_kwargs,
tool_calls=tool_calls,
invalid_tool_calls=invalid_tool_calls,
response_metadata={"model_provider": "groq"},
)
if role == "system":
return SystemMessage(content=_dict.get("content", ""))
if role == "function":
return FunctionMessage(content=_dict.get("content", ""), name=_dict.get("name")) # type: ignore[arg-type]
if role == "tool":
additional_kwargs = {}
if "name" in _dict:
additional_kwargs["name"] = _dict["name"]
return ToolMessage(
content=_dict.get("content", ""),
tool_call_id=_dict.get("tool_call_id"),
additional_kwargs=additional_kwargs,
)
return ChatMessage(content=_dict.get("content", ""), role=role) # type: ignore[arg-type]
def _lc_tool_call_to_groq_tool_call(tool_call: ToolCall) -> dict:
return {
"type": "function",
"id": tool_call["id"],
"function": {
"name": tool_call["name"],
"arguments": json.dumps(tool_call["args"], ensure_ascii=False),
},
}
def _lc_invalid_tool_call_to_groq_tool_call(
invalid_tool_call: InvalidToolCall,
) -> dict:
return {
"type": "function",
"id": invalid_tool_call["id"],
"function": {
"name": invalid_tool_call["name"],
"arguments": invalid_tool_call["args"],
},
}
def _create_usage_metadata(groq_token_usage: dict) -> UsageMetadata:
"""Create usage metadata from Groq token usage response.
Args:
groq_token_usage: Token usage dict from Groq API response.
Returns:
Usage metadata dict with input/output token details.
"""
# Support both formats: new Responses API uses "input_tokens",
# Chat Completions API uses "prompt_tokens"
input_tokens = (
groq_token_usage.get("input_tokens")
or groq_token_usage.get("prompt_tokens")
or 0
)
output_tokens = (
groq_token_usage.get("output_tokens")
or groq_token_usage.get("completion_tokens")
or 0
)
total_tokens = groq_token_usage.get("total_tokens") or input_tokens + output_tokens
# Support both formats for token details:
# Responses API uses "*_tokens_details", Chat Completions API might use
# "prompt_token_details"
input_details_dict = (
groq_token_usage.get("input_tokens_details")
or groq_token_usage.get("prompt_tokens_details")
or {}
)
output_details_dict = (
groq_token_usage.get("output_tokens_details")
or groq_token_usage.get("completion_tokens_details")
or {}
)
input_token_details: dict = {
"cache_read": input_details_dict.get("cached_tokens"),
}
output_token_details: dict = {
"reasoning": output_details_dict.get("reasoning_tokens"),
}
usage_metadata: UsageMetadata = {
"input_tokens": input_tokens,
"output_tokens": output_tokens,
"total_tokens": total_tokens,
}
if filtered_input := {k: v for k, v in input_token_details.items() if v}:
usage_metadata["input_token_details"] = InputTokenDetails(**filtered_input) # type: ignore[typeddict-item]
if filtered_output := {k: v for k, v in output_token_details.items() if v}:
usage_metadata["output_token_details"] = OutputTokenDetails(**filtered_output) # type: ignore[typeddict-item]
return usage_metadata
| ChatGroq |
python | PrefectHQ__prefect | src/prefect/server/schemas/core.py | {
"start": 37278,
"end": 37831
} | class ____(ORMBaseModel):
"""An ORM representation of an agent"""
name: str = Field(
default_factory=lambda: generate_slug(2),
description=(
"The name of the agent. If a name is not provided, it will be"
" auto-generated."
),
)
work_queue_id: UUID = Field(
default=..., description="The work queue with which the agent is associated."
)
last_activity_time: Optional[DateTime] = Field(
default=None, description="The last time this agent polled for work."
)
| Agent |
python | geekcomputers__Python | Checker_game_by_dz/modules/checker.py | {
"start": 132,
"end": 1967
} | class ____:
def __init__(self, window):
self._init()
self.window = window
# to update the position
def update(self):
self.board.draw(self.window)
self.draw_moves(self.valid_moves)
pg.display.update()
def _init(self):
self.select = None
self.board = checker_board()
self.turn = black
self.valid_moves = {}
# to reset the position
def reset(self):
self._init()
# select row and column
def selectrc(self, row, col):
if self.select:
result = self._move(row, col)
if not result:
self.select = None
piece = self.board.get_piece(row, col)
if (piece != 0) and (piece.color == self.turn):
self.select = piece
self.valid_moves = self.board.get_valid_moves(piece)
return True
return False
# to move the pieces
def _move(self, row, col):
piece = self.board.get_piece(row, col)
if (self.select) and (piece == 0) and (row, col) in self.valid_moves:
self.board.move(self.select, row, col)
skip = self.valid_moves[(row, col)]
if skip:
self.board.remove(skip)
self.chg_turn()
else:
return False
return True
# to draw next possible move
def draw_moves(self, moves):
for move in moves:
row, col = move
pg.draw.circle(
self.window,
red,
(col * sq_size + sq_size // 2, row * sq_size + sq_size // 2),
15,
)
# for changing the turn
def chg_turn(self):
self.valid_moves = {}
if self.turn == black:
self.turn = white
else:
self.turn = black
| checker |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/tests/llama_index/conftest.py | {
"start": 2450,
"end": 9213
} | class ____(BaseModel):
"""Table configuration for test parameterization.
:param existing: Whether the table should be created before running a test.
:param schema_name: Schema where the table resides.
:param table_name: Name of the table.
:param id_column: Primary key column name (uuid).
:param content_column: Text content column name.
:param embedding_column: Vector/embedding column name.
:param embedding_type: Embedding type (e.g., "vector").
:param embedding_dimension: Embedding dimension length.
:param metadata_column: List of metadata column names or (name, type) tuples.
"""
existing: bool
schema_name: str
table_name: str
id_column: str
content_column: str
embedding_column: str
embedding_type: VectorType
embedding_dimension: PositiveInt
embedding_index: Algorithm | None
metadata_column: str
@pytest.fixture(**_FIXTURE_PARAMS_TABLE)
def table(
connection_pool: ConnectionPool,
schema: str,
request: pytest.FixtureRequest,
) -> Generator[Table, Any, None]:
"""Fixture to provide a parametrized table configuration for synchronous tests.
This fixture yields a `Table` model with normalized metadata columns. When
the parameter `existing` is `True`, it creates the table in the provided
schema before yielding and drops it after the test class completes.
:param connection_pool: The synchronous connection pool to use for DDL.
:type connection_pool: ConnectionPool
:param schema: The schema name where the table should be created.
:type schema: str
:param request: The pytest request object providing parametrization.
:type request: pytest.FixtureRequest
:return: A generator yielding a `Table` configuration.
:rtype: Generator[Table, Any, None]
"""
assert isinstance(request.param, dict), "Request param must be a dictionary"
table = Table(
existing=request.param.get("existing", None),
schema_name=schema,
table_name=request.param.get("table_name", "llamaindex"),
id_column=request.param.get("id_column", "id"),
content_column=request.param.get("content_column", "content"),
embedding_column=request.param.get("embedding_column", "embedding"),
embedding_type=request.param.get("embedding_type", "vector"),
embedding_dimension=request.param.get("embedding_dimension", 1_536),
embedding_index=request.param.get("embedding_index", None),
metadata_column=request.param.get("metadata_column", "metadata"),
)
if table.existing:
with connection_pool.connection() as conn, conn.cursor() as cur:
cur.execute(
sql.SQL(
"""
create table {table_name} (
{id_column} uuid primary key,
{content_column} text,
{embedding_column} {embedding_type}({embedding_dimension}),
{metadata_column} jsonb
)
"""
).format(
table_name=sql.Identifier(schema, table.table_name),
id_column=sql.Identifier(table.id_column),
content_column=sql.Identifier(table.content_column),
embedding_column=sql.Identifier(table.embedding_column),
embedding_type=sql.Identifier(table.embedding_type),
embedding_dimension=sql.Literal(table.embedding_dimension),
metadata_column=sql.Identifier(table.metadata_column),
)
)
yield table
with connection_pool.connection() as conn, conn.cursor() as cur:
cur.execute(
sql.SQL("drop table {table} cascade").format(
table=sql.Identifier(schema, table.table_name)
)
)
@pytest.fixture(
params=[
"filter1",
"filter2",
]
)
def filters(
request: pytest.FixtureRequest,
) -> MetadataFilters | None:
"""Define filters for various queries."""
if request.param == "filter1":
vsfilters = MetadataFilters(
filters=[MetadataFilter(key="metadata_column2", value="3", operator="!=")],
condition="and",
)
elif request.param == "filter2":
vsfilters = MetadataFilters(
filters=[
MetadataFilters(
filters=[
MetadataFilter(
key="metadata_column1", value="not-text", operator="!="
),
],
condition="or",
),
MetadataFilters(
filters=[
MetadataFilter(
key="metadata_column2", value="3", operator="!="
),
],
condition="and",
),
],
condition="and",
)
else:
return None
return vsfilters
@pytest.fixture
def vectorstore(connection_pool: ConnectionPool, table: Table) -> AzurePGVectorStore:
"""Define vectorstore with DiskANN."""
diskann = DiskANN(
op_class="vector_cosine_ops", max_neighbors=32, l_value_ib=100, l_value_is=100
)
vector_store = AzurePGVectorStore.from_params(
connection_pool=connection_pool,
schema_name=table.schema_name,
table_name=table.table_name,
embed_dim=table.embedding_dimension,
embedding_index=diskann,
)
# add several documents with deterministic embeddings for testing similarity
dim = int(table.embedding_dimension)
nodes = []
n1 = Node()
n1.node_id = "00000000-0000-0000-0000-000000000001"
n1.set_content("Text 1 about cats")
n1.embedding = [1.0] * dim
n1.metadata = {"metadata_column1": "text1", "metadata_column2": 1}
nodes.append(n1)
n2 = Node()
n2.node_id = "00000000-0000-0000-0000-000000000002"
n2.set_content("Text 2 about tigers")
# tigers should be close to cats
n2.embedding = [0.95] * dim
n2.metadata = {"metadata_column1": "text2", "metadata_column2": 2}
nodes.append(n2)
n3 = Node()
n3.node_id = "00000000-0000-0000-0000-000000000003"
n3.set_content("Text 3 about dogs")
n3.embedding = [0.3] * dim
n3.metadata = {"metadata_column1": "text3", "metadata_column2": 3}
nodes.append(n3)
n4 = Node()
n4.node_id = "00000000-0000-0000-0000-000000000004"
n4.set_content("Text 4 about plants")
n4.embedding = [-1.0] * dim
n4.metadata = {"metadata_column1": "text4", "metadata_column2": 4}
nodes.append(n4)
vector_store.add(nodes)
return vector_store
| Table |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/cascade_delete_relationships/tutorial003_py39.py | {
"start": 120,
"end": 361
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
headquarters: str
heroes: list["Hero"] = Relationship(back_populates="team", passive_deletes="all")
| Team |
python | django__django | django/template/defaulttags.py | {
"start": 29381,
"end": 29677
} | class ____(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
| TemplateLiteral |
python | huggingface__transformers | src/transformers/models/visual_bert/modeling_visual_bert.py | {
"start": 59524,
"end": 67402
} | class ____(VisualBertPreTrainedModel):
_tied_weights_keys = {
"cls.predictions.decoder.bias": "cls.predictions.bias",
"cls.predictions.decoder.weight": "visual_bert.embeddings.word_embeddings.weight",
}
def __init__(self, config):
super().__init__(config)
self.visual_bert = VisualBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.cls = VisualBertPreTrainingHeads(config)
self.attention = VisualBertRegionToPhraseAttention(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
visual_embeds: Optional[torch.FloatTensor] = None,
visual_attention_mask: Optional[torch.LongTensor] = None,
visual_token_type_ids: Optional[torch.LongTensor] = None,
image_text_alignment: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
region_to_phrase_position: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*):
The embedded representation of the visual inputs, generally derived using using an object detector.
visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*):
Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*):
Segment token indices to indicate different portions of the visual embeds.
[What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the
*visual_token_type_ids* to *1* for all tokens.
image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*):
Image-Text alignment uses to decide the position IDs of the visual embeddings.
region_to_phrase_position (`torch.LongTensor` of shape `(batch_size, total_sequence_length)`, *optional*):
The positions depicting the position of the image embedding corresponding to the textual tokens.
labels (`torch.LongTensor` of shape `(batch_size, total_sequence_length, visual_sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. KLDivLoss is computed against these labels and the
outputs from the attention layer.
Example:
```python
# Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.
from transformers import AutoTokenizer, VisualBertForRegionToPhraseAlignment
import torch
tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
model = VisualBertForRegionToPhraseAlignment.from_pretrained("uclanlp/visualbert-vqa-coco-pre")
text = "Who is eating the apple?"
inputs = tokenizer(text, return_tensors="pt")
visual_embeds = get_visual_embeddings(image).unsqueeze(0)
visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
region_to_phrase_position = torch.ones((1, inputs["input_ids"].shape[-1] + visual_embeds.shape[-2]))
inputs.update(
{
"region_to_phrase_position": region_to_phrase_position,
"visual_embeds": visual_embeds,
"visual_token_type_ids": visual_token_type_ids,
"visual_attention_mask": visual_attention_mask,
}
)
labels = torch.ones(
(1, inputs["input_ids"].shape[-1] + visual_embeds.shape[-2], visual_embeds.shape[-2])
) # Batch size 1
outputs = model(**inputs, labels=labels)
loss = outputs.loss
scores = outputs.logits
```"""
if region_to_phrase_position is None:
raise ValueError("`region_to_phrase_position` should not be None when using Flickr Model.")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
region_to_phrase_position_mask = (region_to_phrase_position != -1).long()
# Make the -1 become 0
region_to_phrase_position = region_to_phrase_position * region_to_phrase_position_mask
# Selected_positions = batch x selected position x dim
expanded_region_to_phrase_positions = region_to_phrase_position.unsqueeze(2).expand(
region_to_phrase_position.size(0), region_to_phrase_position.size(1), sequence_output.size(2)
)
selected_positions = sequence_output.gather(1, expanded_region_to_phrase_positions)
# Visual Features = batch x visual_feature_length x dim
# This will need separate image and visual masks.
visual_features = sequence_output[:, attention_mask.size(1) :]
if visual_features.size(1) != visual_attention_mask.size(1):
raise ValueError(
f"Visual features length :{visual_features.size(1)} should be the same"
f" as visual attention mask length: {visual_attention_mask.size(1)}."
)
logits = self.attention(selected_positions, visual_features, visual_attention_mask)
loss = None
if labels is not None:
# scores = batch x selected position x visual_feature
# scores = selected_positions.bmm(visual_features.transpose(1,2))
# label = batch x selected_position x needed position
loss_fct = KLDivLoss(reduction="batchmean")
log_softmax = LogSoftmax(dim=-1)
scores = log_softmax(logits)
labels = labels.contiguous()
loss = loss_fct(scores, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"VisualBertForMultipleChoice",
"VisualBertForPreTraining",
"VisualBertForQuestionAnswering",
"VisualBertForRegionToPhraseAlignment",
"VisualBertForVisualReasoning",
"VisualBertLayer",
"VisualBertModel",
"VisualBertPreTrainedModel",
]
| VisualBertForRegionToPhraseAlignment |
python | getsentry__sentry | tests/sentry/middleware/test_access_log_middleware.py | {
"start": 2802,
"end": 3399
} | class ____(Endpoint):
permission_classes = (AllowAny,)
enforce_rate_limit = True
rate_limits = RateLimitConfig(
group="foo",
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=20, window=1, concurrent_limit=1),
RateLimitCategory.USER: RateLimit(limit=20, window=1, concurrent_limit=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=20, window=1, concurrent_limit=1),
},
},
)
def get(self, request):
return Response({"ok": True})
| ConcurrentRateLimitedEndpoint |
python | scrapy__scrapy | tests/test_feedexport.py | {
"start": 95577,
"end": 98201
} | class ____:
items = [
{"foo": "bar1", "egg": "spam1"},
{"foo": "bar2", "egg": "spam2", "baz": "quux2"},
{"foo": "bar3", "baz": "quux3"},
]
with tempfile.NamedTemporaryFile(suffix="json") as tmp:
settings = {
"FEEDS": {
f"file:///{tmp.name}": {
"format": "json",
},
},
}
def feed_exporter_closed_signal_handler(self):
self.feed_exporter_closed_received = True
def feed_slot_closed_signal_handler(self, slot):
self.feed_slot_closed_received = True
def feed_exporter_closed_signal_handler_deferred(self):
d = defer.Deferred()
d.addCallback(lambda _: setattr(self, "feed_exporter_closed_received", True))
d.callback(None)
return d
def feed_slot_closed_signal_handler_deferred(self, slot):
d = defer.Deferred()
d.addCallback(lambda _: setattr(self, "feed_slot_closed_received", True))
d.callback(None)
return d
def run_signaled_feed_exporter(
self, feed_exporter_signal_handler, feed_slot_signal_handler
):
crawler = get_crawler(settings_dict=self.settings)
feed_exporter = FeedExporter.from_crawler(crawler)
spider = scrapy.Spider("default")
spider.crawler = crawler
crawler.signals.connect(
feed_exporter_signal_handler,
signal=signals.feed_exporter_closed,
)
crawler.signals.connect(
feed_slot_signal_handler, signal=signals.feed_slot_closed
)
feed_exporter.open_spider(spider)
for item in self.items:
feed_exporter.item_scraped(item, spider)
defer.ensureDeferred(feed_exporter.close_spider(spider))
def test_feed_exporter_signals_sent(self):
self.feed_exporter_closed_received = False
self.feed_slot_closed_received = False
self.run_signaled_feed_exporter(
self.feed_exporter_closed_signal_handler,
self.feed_slot_closed_signal_handler,
)
assert self.feed_slot_closed_received
assert self.feed_exporter_closed_received
def test_feed_exporter_signals_sent_deferred(self):
self.feed_exporter_closed_received = False
self.feed_slot_closed_received = False
self.run_signaled_feed_exporter(
self.feed_exporter_closed_signal_handler_deferred,
self.feed_slot_closed_signal_handler_deferred,
)
assert self.feed_slot_closed_received
assert self.feed_exporter_closed_received
| TestFeedExporterSignals |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 10689,
"end": 10842
} | class ____(_NumberBoundError):
code = 'number.not_ge'
msg_template = 'ensure this value is greater than or equal to {limit_value}'
| NumberNotGeError |
python | kamyu104__LeetCode-Solutions | Python/make-the-prefix-sum-non-negative.py | {
"start": 75,
"end": 481
} | class ____(object):
def makePrefSumNonNegative(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = prefix = 0
min_heap = []
for x in nums:
heapq.heappush(min_heap, x)
prefix += x
if prefix < 0:
prefix -= heapq.heappop(min_heap)
result += 1
return result
| Solution |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py | {
"start": 8075,
"end": 9268
} | class ____(RpcAgentTestFixture):
@dist_init(setup_rpc=False)
def test_rl_rpc(self):
if self.rank == 0:
# Rank 0 is the agent.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
agent = Agent(self.world_size)
run_agent(agent, n_steps=int(TOTAL_EPISODE_STEP / (self.world_size - 1)))
# Ensure training was run. We don't really care about whether the task was learned,
# since the purpose of the test is to check the API calls.
self.assertGreater(agent.running_reward, 0.0)
else:
# Other ranks are observers that passively wait for instructions from the agent.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
| ReinforcementLearningRpcTest |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_datetime64.py | {
"start": 64300,
"end": 73796
} | class ____:
def test_empty_series_add_sub(self, box_with_array):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
a = box_with_array(a)
b = box_with_array(b)
tm.assert_equal(a, a + b)
tm.assert_equal(a, a - b)
tm.assert_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self, unit):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts], dtype=f"M8[{unit}]")
result = ser - dt
# the expected unit is the max of `unit` and the unit imputed to `dt`,
# which is "us"
exp_unit = tm.get_finest_unit(unit, "us")
assert result.dtype == f"timedelta64[{exp_unit}]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
@pytest.mark.parametrize(
"left, right, op_fail",
[
[
[Timestamp("20111230"), Timestamp("20120101"), NaT],
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")],
["__sub__", "__rsub__"],
],
[
[Timestamp("20111230"), Timestamp("20120101"), NaT],
[timedelta(minutes=5, seconds=3), timedelta(minutes=5, seconds=3), NaT],
["__add__", "__radd__", "__sub__"],
],
[
[
Timestamp("20111230", tz="US/Eastern"),
Timestamp("20111230", tz="US/Eastern"),
NaT,
],
[timedelta(minutes=5, seconds=3), NaT, timedelta(minutes=5, seconds=3)],
["__add__", "__radd__", "__sub__"],
],
],
)
def test_operators_datetimelike_invalid(
self, left, right, op_fail, all_arithmetic_operators
):
# these are all TypeError ops
op_str = all_arithmetic_operators
arg1 = Series(left)
arg2 = Series(right)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(arg1, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
if op_str not in op_fail:
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(arg2)
else:
# Smoke test
op(arg2)
def test_sub_single_tz(self, unit):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")]).dt.as_unit(unit)
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")]).dt.as_unit(unit)
result = s1 - s2
expected = Series([Timedelta("2days")]).dt.as_unit(unit)
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")]).dt.as_unit(unit)
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific", unit="ns")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self, unit):
# see GH#14088
ser = Series([datetime(2016, 8, 23, 12, tzinfo=timezone.utc), NaT]).dt.as_unit(
unit
)
dt = datetime(2016, 8, 22, 12, tzinfo=timezone.utc)
# The datetime object has "us" so we upcast lower units
exp_unit = tm.get_finest_unit(unit, "us")
exp = Series([Timedelta("1 days"), NaT]).dt.as_unit(exp_unit)
result = ser - dt
tm.assert_series_equal(result, exp)
result2 = ser - Timestamp(dt)
tm.assert_series_equal(result2, exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self, unit):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")]).dt.as_unit(unit)
nat_series_dtype_timestamp = Series([NaT, NaT], dtype=f"datetime64[{unit}]")
single_nat_dtype_datetime = Series([NaT], dtype=f"datetime64[{unit}]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="h"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot subtract DatetimeArray from"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
| TestTimestampSeriesArithmetic |
python | scipy__scipy | scipy/sparse/linalg/_interface.py | {
"start": 26738,
"end": 27118
} | class ____(LinearOperator):
def __init__(self, A):
super().__init__(A.dtype, A.shape)
self.A = A
self.__adj = None
self.args = (A,)
def _matmat(self, X):
return self.A.dot(X)
def _adjoint(self):
if self.__adj is None:
self.__adj = _AdjointMatrixOperator(self.A)
return self.__adj
| MatrixLinearOperator |
python | scipy__scipy | benchmarks/benchmarks/optimize_milp.py | {
"start": 1764,
"end": 2630
} | class ____(Benchmark):
# TODO: look at 5,6 - timing out and disabled in Apr'24 (5) and Aug'23 (6)
# see gh-19389 for details
params = [[3, 4]]
param_names = ['size']
def setup(self, n):
A_eq, b_eq, self.c, self.numbers, self.M = magic_square(n)
self.constraints = (A_eq, b_eq, b_eq)
def time_magic_square(self, n):
res = milp(c=self.c*0, constraints=self.constraints,
bounds=(0, 1), integrality=True)
assert res.status == 0
x = np.round(res.x)
s = (self.numbers.flatten() * x).reshape(n**2, n, n)
square = np.sum(s, axis=0)
assert_allclose(square.sum(axis=0), self.M)
assert_allclose(square.sum(axis=1), self.M)
assert_allclose(np.diag(square).sum(), self.M)
assert_allclose(np.diag(square[:, ::-1]).sum(), self.M)
| MilpMagicSquare |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/git_url_svn_top_level/package.py | {
"start": 217,
"end": 557
} | class ____(Package):
"""Mock package that uses git for fetching."""
homepage = "http://www.git-fetch-example.com"
# can't have two VCS fetchers.
url = "https://example.com/some/tarball-1.0.tar.gz"
git = "https://example.com/some/git/repo"
svn = "https://example.com/some/svn/repo"
version("2.0")
| GitUrlSvnTopLevel |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_tz_convert.py | {
"start": 175,
"end": 4984
} | class ____:
def test_tz_convert(self, frame_or_series):
rng = date_range(
"1/1/2011", periods=200, freq="D", tz=zoneinfo.ZoneInfo("US/Eastern")
)
obj = DataFrame({"a": 1}, index=rng)
obj = tm.get_obj(obj, frame_or_series)
berlin = zoneinfo.ZoneInfo("Europe/Berlin")
result = obj.tz_convert(berlin)
expected = DataFrame({"a": 1}, rng.tz_convert(berlin))
expected = tm.get_obj(expected, frame_or_series)
assert result.index.tz.key == "Europe/Berlin"
tm.assert_equal(result, expected)
def test_tz_convert_axis1(self):
rng = date_range(
"1/1/2011", periods=200, freq="D", tz=zoneinfo.ZoneInfo("US/Eastern")
)
obj = DataFrame({"a": 1}, index=rng)
obj = obj.T
berlin = zoneinfo.ZoneInfo("Europe/Berlin")
result = obj.tz_convert(berlin, axis=1)
assert result.columns.tz.key == "Europe/Berlin"
expected = DataFrame({"a": 1}, rng.tz_convert(berlin))
tm.assert_equal(result, expected.T)
def test_tz_convert_naive(self, frame_or_series):
# can't convert tz-naive
rng = date_range("1/1/2011", periods=200, freq="D")
ts = Series(1, index=rng)
ts = frame_or_series(ts)
with pytest.raises(TypeError, match="Cannot convert tz-naive"):
ts.tz_convert("US/Eastern")
@pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"])
def test_tz_convert_and_localize(self, fn):
l0 = date_range("20140701", periods=5, freq="D")
l1 = date_range("20140701", periods=5, freq="D")
int_idx = Index(range(5))
if fn == "tz_convert":
l0 = l0.tz_localize("UTC")
l1 = l1.tz_localize("UTC")
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)("US/Pacific")
l1_expected = getattr(idx, fn)("US/Pacific")
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)("US/Pacific")
tm.assert_index_equal(df1.index, l0_expected)
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
# freq is not preserved in MultiIndex construction
l1_expected = l1_expected._with_freq(None)
l0_expected = l0_expected._with_freq(None)
l1 = l1._with_freq(None)
l0 = l0._with_freq(None)
df3 = getattr(df2, fn)("US/Pacific", level=0)
assert not df3.index.levels[0].equals(l0)
tm.assert_index_equal(df3.index.levels[0], l0_expected)
tm.assert_index_equal(df3.index.levels[1], l1)
assert not df3.index.levels[1].equals(l1_expected)
df3 = getattr(df2, fn)("US/Pacific", level=1)
tm.assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
tm.assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
getattr(df4, fn)("US/Pacific", level=1)
tm.assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
tm.assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
@pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"])
def test_tz_convert_and_localize_bad_input(self, fn):
int_idx = Index(range(5))
l0 = date_range("20140701", periods=5, freq="D")
# Not DatetimeIndex / PeriodIndex
df = DataFrame(index=int_idx)
with pytest.raises(TypeError, match="DatetimeIndex"):
getattr(df, fn)("US/Pacific")
# Not DatetimeIndex / PeriodIndex
df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0]))
with pytest.raises(TypeError, match="DatetimeIndex"):
getattr(df, fn)("US/Pacific", level=0)
# Invalid level
df = DataFrame(index=l0)
with pytest.raises(ValueError, match="not valid"):
getattr(df, fn)("US/Pacific", level=1)
def test_tz_convert_copy_inplace_mutate(self, frame_or_series):
# GH#6326
obj = frame_or_series(
np.arange(0, 5),
index=date_range("20131027", periods=5, freq="h", tz="Europe/Berlin"),
)
orig = obj.copy()
result = obj.tz_convert("UTC")
expected = frame_or_series(np.arange(0, 5), index=obj.index.tz_convert("UTC"))
tm.assert_equal(result, expected)
tm.assert_equal(obj, orig)
assert result.index is not obj.index
assert result is not obj
| TestTZConvert |
python | sqlalchemy__sqlalchemy | test/dialect/mysql/test_types.py | {
"start": 1091,
"end": 15246
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mysql.dialect()
@testing.combinations(
# column type, args, kwargs, expected ddl
# e.g. Column(Integer(10, unsigned=True)) ==
# 'INTEGER(10) UNSIGNED'
(mysql.MSNumeric, [], {}, "NUMERIC"),
(mysql.MSNumeric, [None], {}, "NUMERIC"),
(mysql.MSNumeric, [12], {}, "NUMERIC(12)"),
(
mysql.MSNumeric,
[12, 4],
{"unsigned": True},
"NUMERIC(12, 4) UNSIGNED",
),
(
mysql.MSNumeric,
[12, 4],
{"zerofill": True},
"NUMERIC(12, 4) ZEROFILL",
),
(
mysql.MSNumeric,
[12, 4],
{"zerofill": True, "unsigned": True},
"NUMERIC(12, 4) UNSIGNED ZEROFILL",
),
(mysql.MSDecimal, [], {}, "DECIMAL"),
(mysql.MSDecimal, [None], {}, "DECIMAL"),
(mysql.MSDecimal, [12], {}, "DECIMAL(12)"),
(mysql.MSDecimal, [12, None], {}, "DECIMAL(12)"),
(
mysql.MSDecimal,
[12, 4],
{"unsigned": True},
"DECIMAL(12, 4) UNSIGNED",
),
(
mysql.MSDecimal,
[12, 4],
{"zerofill": True},
"DECIMAL(12, 4) ZEROFILL",
),
(
mysql.MSDecimal,
[12, 4],
{"zerofill": True, "unsigned": True},
"DECIMAL(12, 4) UNSIGNED ZEROFILL",
),
(mysql.MSDouble, [None, None], {}, "DOUBLE"),
(
mysql.MSDouble,
[12, 4],
{"unsigned": True},
"DOUBLE(12, 4) UNSIGNED",
),
(
mysql.MSDouble,
[12, 4],
{"zerofill": True},
"DOUBLE(12, 4) ZEROFILL",
),
(
mysql.MSDouble,
[12, 4],
{"zerofill": True, "unsigned": True},
"DOUBLE(12, 4) UNSIGNED ZEROFILL",
),
(mysql.MSReal, [None, None], {}, "REAL"),
(mysql.MSReal, [12, 4], {"unsigned": True}, "REAL(12, 4) UNSIGNED"),
(mysql.MSReal, [12, 4], {"zerofill": True}, "REAL(12, 4) ZEROFILL"),
(
mysql.MSReal,
[12, 4],
{"zerofill": True, "unsigned": True},
"REAL(12, 4) UNSIGNED ZEROFILL",
),
(mysql.MSFloat, [], {}, "FLOAT"),
(mysql.MSFloat, [None], {}, "FLOAT"),
(mysql.MSFloat, [12], {}, "FLOAT(12)"),
(mysql.MSFloat, [12, 4], {}, "FLOAT(12, 4)"),
(mysql.MSFloat, [12, 4], {"unsigned": True}, "FLOAT(12, 4) UNSIGNED"),
(mysql.MSFloat, [12, 4], {"zerofill": True}, "FLOAT(12, 4) ZEROFILL"),
(
mysql.MSFloat,
[12, 4],
{"zerofill": True, "unsigned": True},
"FLOAT(12, 4) UNSIGNED ZEROFILL",
),
(mysql.MSInteger, [], {}, "INTEGER"),
(mysql.MSInteger, [4], {}, "INTEGER(4)"),
(mysql.MSInteger, [4], {"unsigned": True}, "INTEGER(4) UNSIGNED"),
(mysql.MSInteger, [4], {"zerofill": True}, "INTEGER(4) ZEROFILL"),
(
mysql.MSInteger,
[4],
{"zerofill": True, "unsigned": True},
"INTEGER(4) UNSIGNED ZEROFILL",
),
(mysql.MSBigInteger, [], {}, "BIGINT"),
(mysql.MSBigInteger, [4], {}, "BIGINT(4)"),
(mysql.MSBigInteger, [4], {"unsigned": True}, "BIGINT(4) UNSIGNED"),
(mysql.MSBigInteger, [4], {"zerofill": True}, "BIGINT(4) ZEROFILL"),
(
mysql.MSBigInteger,
[4],
{"zerofill": True, "unsigned": True},
"BIGINT(4) UNSIGNED ZEROFILL",
),
(mysql.MSMediumInteger, [], {}, "MEDIUMINT"),
(mysql.MSMediumInteger, [4], {}, "MEDIUMINT(4)"),
(
mysql.MSMediumInteger,
[4],
{"unsigned": True},
"MEDIUMINT(4) UNSIGNED",
),
(
mysql.MSMediumInteger,
[4],
{"zerofill": True},
"MEDIUMINT(4) ZEROFILL",
),
(
mysql.MSMediumInteger,
[4],
{"zerofill": True, "unsigned": True},
"MEDIUMINT(4) UNSIGNED ZEROFILL",
),
(mysql.MSTinyInteger, [], {}, "TINYINT"),
(mysql.MSTinyInteger, [1], {}, "TINYINT(1)"),
(mysql.MSTinyInteger, [1], {"unsigned": True}, "TINYINT(1) UNSIGNED"),
(mysql.MSTinyInteger, [1], {"zerofill": True}, "TINYINT(1) ZEROFILL"),
(
mysql.MSTinyInteger,
[1],
{"zerofill": True, "unsigned": True},
"TINYINT(1) UNSIGNED ZEROFILL",
),
(mysql.MSSmallInteger, [], {}, "SMALLINT"),
(mysql.MSSmallInteger, [4], {}, "SMALLINT(4)"),
(
mysql.MSSmallInteger,
[4],
{"unsigned": True},
"SMALLINT(4) UNSIGNED",
),
(
mysql.MSSmallInteger,
[4],
{"zerofill": True},
"SMALLINT(4) ZEROFILL",
),
(
mysql.MSSmallInteger,
[4],
{"zerofill": True, "unsigned": True},
"SMALLINT(4) UNSIGNED ZEROFILL",
),
)
def test_numeric(self, type_, args, kw, res):
"Exercise type specification and options for numeric types."
type_inst = type_(*args, **kw)
self.assert_compile(type_inst, res)
# test that repr() copies out all arguments
self.assert_compile(eval("mysql.%r" % type_inst), res)
@testing.combinations(
(mysql.MSChar, [1], {}, "CHAR(1)"),
(mysql.NCHAR, [1], {}, "NATIONAL CHAR(1)"),
(mysql.MSChar, [1], {"binary": True}, "CHAR(1) BINARY"),
(mysql.MSChar, [1], {"ascii": True}, "CHAR(1) ASCII"),
(mysql.MSChar, [1], {"unicode": True}, "CHAR(1) UNICODE"),
(
mysql.MSChar,
[1],
{"ascii": True, "binary": True},
"CHAR(1) ASCII BINARY",
),
(
mysql.MSChar,
[1],
{"unicode": True, "binary": True},
"CHAR(1) UNICODE BINARY",
),
(mysql.MSChar, [1], {"charset": "utf8"}, "CHAR(1) CHARACTER SET utf8"),
(
mysql.MSChar,
[1],
{"charset": "utf8", "binary": True},
"CHAR(1) CHARACTER SET utf8 BINARY",
),
(
mysql.MSChar,
[1],
{"charset": "utf8", "unicode": True},
"CHAR(1) CHARACTER SET utf8",
),
(
mysql.MSChar,
[1],
{"charset": "utf8", "ascii": True},
"CHAR(1) CHARACTER SET utf8",
),
(
mysql.MSChar,
[1],
{"collation": "utf8_bin"},
"CHAR(1) COLLATE utf8_bin",
),
(
mysql.MSChar,
[1],
{"charset": "utf8", "collation": "utf8_bin"},
"CHAR(1) CHARACTER SET utf8 COLLATE utf8_bin",
),
(
mysql.MSChar,
[1],
{"charset": "utf8", "binary": True},
"CHAR(1) CHARACTER SET utf8 BINARY",
),
(
mysql.MSChar,
[1],
{"charset": "utf8", "collation": "utf8_bin", "binary": True},
"CHAR(1) CHARACTER SET utf8 COLLATE utf8_bin",
),
(mysql.MSChar, [1], {"national": True}, "NATIONAL CHAR(1)"),
(
mysql.MSChar,
[1],
{"national": True, "charset": "utf8"},
"NATIONAL CHAR(1)",
),
(
mysql.MSChar,
[1],
{"national": True, "charset": "utf8", "binary": True},
"NATIONAL CHAR(1) BINARY",
),
(
mysql.MSChar,
[1],
{"national": True, "binary": True, "unicode": True},
"NATIONAL CHAR(1) BINARY",
),
(
mysql.MSChar,
[1],
{"national": True, "collation": "utf8_bin"},
"NATIONAL CHAR(1) COLLATE utf8_bin",
),
(
mysql.MSString,
[1],
{"charset": "utf8", "collation": "utf8_bin"},
"VARCHAR(1) CHARACTER SET utf8 COLLATE utf8_bin",
),
(
mysql.MSString,
[1],
{"national": True, "collation": "utf8_bin"},
"NATIONAL VARCHAR(1) COLLATE utf8_bin",
),
(
mysql.MSTinyText,
[],
{"charset": "utf8", "collation": "utf8_bin"},
"TINYTEXT CHARACTER SET utf8 COLLATE utf8_bin",
),
(
mysql.MSMediumText,
[],
{"charset": "utf8", "binary": True},
"MEDIUMTEXT CHARACTER SET utf8 BINARY",
),
(mysql.MSLongText, [], {"ascii": True}, "LONGTEXT ASCII"),
(
mysql.ENUM,
["foo", "bar"],
{"unicode": True},
"""ENUM('foo','bar') UNICODE""",
),
(String, [20], {"collation": "utf8"}, "VARCHAR(20) COLLATE utf8"),
)
@testing.exclude("mysql", "<", (4, 1, 1), "no charset support")
def test_charset(self, type_, args, kw, res):
"""Exercise CHARACTER SET and COLLATE-ish options on string types."""
type_inst = type_(*args, **kw)
self.assert_compile(type_inst, res)
@testing.combinations(
(mysql.MSBit(), "BIT"),
(mysql.MSBit(1), "BIT(1)"),
(mysql.MSBit(63), "BIT(63)"),
)
def test_bit_50(self, type_, expected):
"""Exercise BIT types on 5.0+ (not valid for all engine types)"""
self.assert_compile(type_, expected)
@testing.combinations(
(BOOLEAN(), "BOOL"),
(Boolean(), "BOOL"),
(mysql.TINYINT(1), "TINYINT(1)"),
(mysql.TINYINT(1, unsigned=True), "TINYINT(1) UNSIGNED"),
)
def test_boolean_compile(self, type_, expected):
self.assert_compile(type_, expected)
def test_timestamp_fsp(self):
self.assert_compile(mysql.TIMESTAMP(fsp=5), "TIMESTAMP(5)")
@testing.combinations(
([TIMESTAMP], {}, "TIMESTAMP NULL"),
([mysql.MSTimeStamp], {}, "TIMESTAMP NULL"),
(
[
mysql.MSTimeStamp(),
DefaultClause(sql.text("CURRENT_TIMESTAMP")),
],
{},
"TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP",
),
(
[mysql.MSTimeStamp, DefaultClause(sql.text("CURRENT_TIMESTAMP"))],
{"nullable": False},
"TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP",
),
(
[
mysql.MSTimeStamp,
DefaultClause(sql.text("'1999-09-09 09:09:09'")),
],
{"nullable": False},
"TIMESTAMP NOT NULL DEFAULT '1999-09-09 09:09:09'",
),
(
[
mysql.MSTimeStamp(),
DefaultClause(sql.text("'1999-09-09 09:09:09'")),
],
{},
"TIMESTAMP NULL DEFAULT '1999-09-09 09:09:09'",
),
(
[
mysql.MSTimeStamp(),
DefaultClause(
sql.text(
"'1999-09-09 09:09:09' ON UPDATE CURRENT_TIMESTAMP"
)
),
],
{},
"TIMESTAMP NULL DEFAULT '1999-09-09 09:09:09' "
"ON UPDATE CURRENT_TIMESTAMP",
),
(
[
mysql.MSTimeStamp,
DefaultClause(
sql.text(
"'1999-09-09 09:09:09' ON UPDATE CURRENT_TIMESTAMP"
)
),
],
{"nullable": False},
"TIMESTAMP NOT NULL DEFAULT '1999-09-09 09:09:09' "
"ON UPDATE CURRENT_TIMESTAMP",
),
(
[
mysql.MSTimeStamp(),
DefaultClause(
sql.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")
),
],
{},
"TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP "
"ON UPDATE CURRENT_TIMESTAMP",
),
(
[
mysql.MSTimeStamp,
DefaultClause(
sql.text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")
),
],
{"nullable": False},
"TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP "
"ON UPDATE CURRENT_TIMESTAMP",
),
)
def test_timestamp_defaults(self, spec, kw, expected):
"""Exercise funky TIMESTAMP default syntax when used in columns."""
c = Column("t", *spec, **kw)
Table("t", MetaData(), c)
self.assert_compile(schema.CreateColumn(c), "t %s" % expected)
def test_datetime_generic(self):
self.assert_compile(mysql.DATETIME(), "DATETIME")
def test_datetime_fsp(self):
self.assert_compile(mysql.DATETIME(fsp=4), "DATETIME(4)")
def test_time_generic(self):
"""Exercise TIME."""
self.assert_compile(mysql.TIME(), "TIME")
def test_time_fsp(self):
self.assert_compile(mysql.TIME(fsp=5), "TIME(5)")
def test_time_result_processor(self):
eq_(
mysql.TIME().result_processor(None, None)(
datetime.timedelta(seconds=35, minutes=517, microseconds=450)
),
datetime.time(8, 37, 35, 450),
)
@testing.combinations(
("sa", sqltypes.Float(), "FLOAT"),
("sa", sqltypes.Double(), "DOUBLE"),
("sa", sqltypes.FLOAT(), "FLOAT"),
("sa", sqltypes.REAL(), "REAL"),
("sa", sqltypes.DOUBLE(), "DOUBLE"),
("sa", sqltypes.DOUBLE_PRECISION(), "DOUBLE PRECISION"),
("mysql", mysql.FLOAT(), "FLOAT"),
("mysql", mysql.DOUBLE(), "DOUBLE"),
("mysql", mysql.REAL(), "REAL"),
id_="ira",
)
def test_float_type_compile(self, type_, sql_text):
self.assert_compile(type_, sql_text)
| TypeCompileTest |
python | huggingface__transformers | src/transformers/utils/quantization_config.py | {
"start": 6800,
"end": 9718
} | class ____(QuantizationConfigMixin):
"""This is a wrapper class about all possible attributes and features that you can play with a model that has been
loaded AutoRound quantization.
Args:
bits (`int`, *optional*, defaults to 4):
The number of bits to quantize to, supported numbers are (2, 3, 4, 8).
group_size (`int`, *optional*, defaults to 128): Group-size value
sym (`bool`, *optional*, defaults to `True`): Symmetric quantization or not
backend (`str`, *optional*, defaults to `"auto"`): The kernel to use, e.g., ipex,marlin, exllamav2, triton, etc. Ref. https://github.com/intel/auto-round?tab=readme-ov-file#specify-backend
"""
def __init__(
self,
bits: int = 4,
group_size: int = 128,
sym: bool = True,
backend: str = "auto",
**kwargs,
):
self.bits = bits
self.group_size = group_size
self.sym = sym
self.backend = backend
self.packing_format = "auto_round:gptq"
if kwargs is not None:
for key, value in kwargs.items():
setattr(self, key, value)
self.quant_method = QuantizationMethod.AUTOROUND
self.post_init()
def post_init(self):
r"""Safety checker that arguments are correct."""
if self.bits not in [2, 3, 4, 8]:
raise ValueError(f"Only support quantization to [2,3,4,8] bits but found {self.bits}")
if self.group_size != -1 and self.group_size <= 0:
raise ValueError("group_size must be greater than 0 or equal to -1")
def get_loading_attributes(self):
loading_attributes_dict = {"backend": self.backend}
return loading_attributes_dict
def to_dict(self):
config_dict = super().to_dict()
return config_dict
@classmethod
def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs):
quant_method = config_dict["quant_method"]
if "auto-round" not in quant_method and "gptq" not in quant_method and "awq" not in quant_method:
raise NotImplementedError(
"Failed to convert to auto_round format. Only `gptqv1`, `awq`, and `auto-round` formats are supported."
)
if "gptq" in quant_method and "meta" in config_dict:
raise NotImplementedError("Failed to convert gptq format to auto_round format. Only supports `gptqv1`")
if "awq" in quant_method and config_dict.get("version", "gemm") != "gemm":
raise NotImplementedError(
"Failed to convert awq format to auto_round format. Only supports awq format with gemm version"
)
if "auto-round" not in quant_method:
config_dict["packing_format"] = f"auto_round:{quant_method}"
return super().from_dict(config_dict, return_unused_kwargs=return_unused_kwargs, **kwargs)
@dataclass
| AutoRoundConfig |
python | numba__numba | numba/core/registry.py | {
"start": 355,
"end": 1167
} | class ____(TargetDescriptor):
options = cpu.CPUTargetOptions
@cached_property
def _toplevel_target_context(self):
# Lazily-initialized top-level target context, for all threads
return cpu.CPUContext(self.typing_context, self._target_name)
@cached_property
def _toplevel_typing_context(self):
# Lazily-initialized top-level typing context, for all threads
return typing.Context()
@property
def target_context(self):
"""
The target context for CPU targets.
"""
return self._toplevel_target_context
@property
def typing_context(self):
"""
The typing context for CPU targets.
"""
return self._toplevel_typing_context
# The global CPU target
cpu_target = CPUTarget('cpu')
| CPUTarget |
python | sphinx-doc__sphinx | sphinx/domains/c/__init__.py | {
"start": 23885,
"end": 24818
} | class ____(XRefRole):
def process_link(
self,
env: BuildEnvironment,
refnode: Element,
has_explicit_title: bool,
title: str,
target: str,
) -> tuple[str, str]:
refnode.attributes.update(env.ref_context)
if not has_explicit_title:
# major hax: replace anon names via simple string manipulation.
# Can this actually fail?
title = anon_identifier_re.sub('[anonymous]', str(title))
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1 :]
return title, target
| CXRefRole |
python | django__django | tests/admin_views/models.py | {
"start": 24269,
"end": 24421
} | class ____(models.Model):
def __str__(self):
return "PK=%d" % self.pk
pk_gt_1 = _Manager()
objects = models.Manager()
| FilteredManager |
python | pytest-dev__pytest | src/_pytest/python.py | {
"start": 12491,
"end": 21051
} | class ____(PyobjMixin, nodes.Collector, abc.ABC):
def funcnamefilter(self, name: str) -> bool:
return self._matches_prefix_or_glob_option("python_functions", name)
def isnosetest(self, obj: object) -> bool:
"""Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator.
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
# function) as test classes.
return safe_getattr(obj, "__test__", False) is True
def classnamefilter(self, name: str) -> bool:
return self._matches_prefix_or_glob_option("python_classes", name)
def istestfunction(self, obj: object, name: str) -> bool:
if self.funcnamefilter(name) or self.isnosetest(obj):
if isinstance(obj, staticmethod | classmethod):
# staticmethods and classmethods need to be unwrapped.
obj = safe_getattr(obj, "__func__", False)
return callable(obj) and fixtures.getfixturemarker(obj) is None
else:
return False
def istestclass(self, obj: object, name: str) -> bool:
if not (self.classnamefilter(name) or self.isnosetest(obj)):
return False
if inspect.isabstract(obj):
return False
return True
def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool:
"""Check if the given name matches the prefix or glob-pattern defined
in configuration."""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# Check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call.
elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
name, option
):
return True
return False
def collect(self) -> Iterable[nodes.Item | nodes.Collector]:
if not getattr(self.obj, "__test__", True):
return []
# Avoid random getattrs and peek in the __dict__ instead.
dicts = [getattr(self.obj, "__dict__", {})]
if isinstance(self.obj, type):
for basecls in self.obj.__mro__:
dicts.append(basecls.__dict__)
# In each class, nodes should be definition ordered.
# __dict__ is definition ordered.
seen: set[str] = set()
dict_values: list[list[nodes.Item | nodes.Collector]] = []
collect_imported_tests = self.session.config.getini("collect_imported_tests")
ihook = self.ihook
for dic in dicts:
values: list[nodes.Item | nodes.Collector] = []
# Note: seems like the dict can change during iteration -
# be careful not to remove the list() without consideration.
for name, obj in list(dic.items()):
if name in IGNORED_ATTRIBUTES:
continue
if name in seen:
continue
seen.add(name)
if not collect_imported_tests and isinstance(self, Module):
# Do not collect functions and classes from other modules.
if inspect.isfunction(obj) or inspect.isclass(obj):
if obj.__module__ != self._getobj().__name__:
continue
res = ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj
)
if res is None:
continue
elif isinstance(res, list):
values.extend(res)
else:
values.append(res)
dict_values.append(values)
# Between classes in the class hierarchy, reverse-MRO order -- nodes
# inherited from base classes should come before subclasses.
result = []
for values in reversed(dict_values):
result.extend(values)
return result
def _genfunctions(self, name: str, funcobj) -> Iterator[Function]:
modulecol = self.getparent(Module)
assert modulecol is not None
module = modulecol.obj
clscol = self.getparent(Class)
cls = (clscol and clscol.obj) or None
definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj)
fixtureinfo = definition._fixtureinfo
# pytest_generate_tests impls call metafunc.parametrize() which fills
# metafunc._calls, the outcome of the hook.
metafunc = Metafunc(
definition=definition,
fixtureinfo=fixtureinfo,
config=self.config,
cls=cls,
module=module,
_ispytest=True,
)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if cls is not None and hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc))
if not metafunc._calls:
yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo)
else:
metafunc._recompute_direct_params_indices()
# Direct parametrizations taking place in module/class-specific
# `metafunc.parametrize` calls may have shadowed some fixtures, so make sure
# we update what the function really needs a.k.a its fixture closure. Note that
# direct parametrizations using `@pytest.mark.parametrize` have already been considered
# into making the closure using `ignore_args` arg to `getfixtureclosure`.
fixtureinfo.prune_dependency_tree()
for callspec in metafunc._calls:
subname = f"{name}[{callspec.id}]" if callspec._idlist else name
yield Function.from_parent(
self,
name=subname,
callspec=callspec,
fixtureinfo=fixtureinfo,
keywords={callspec.id: True},
originalname=name,
)
def importtestmodule(
path: Path,
config: Config,
):
# We assume we are only called once per module.
importmode = config.getoption("--import-mode")
try:
mod = import_path(
path,
mode=importmode,
root=config.rootpath,
consider_namespace_packages=config.getini("consider_namespace_packages"),
)
except SyntaxError as e:
raise nodes.Collector.CollectError(
ExceptionInfo.from_current().getrepr(style="short")
) from e
except ImportPathMismatchError as e:
raise nodes.Collector.CollectError(
"import file mismatch:\n"
"imported module {!r} has this __file__ attribute:\n"
" {}\n"
"which is not the same as the test file we want to collect:\n"
" {}\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules".format(*e.args)
) from e
except ImportError as e:
exc_info = ExceptionInfo.from_current()
if config.get_verbosity() < 2:
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
exc_repr = (
exc_info.getrepr(style="short")
if exc_info.traceback
else exc_info.exconly()
)
formatted_tb = str(exc_repr)
raise nodes.Collector.CollectError(
f"ImportError while importing test module '{path}'.\n"
"Hint: make sure your test modules/packages have valid Python names.\n"
"Traceback:\n"
f"{formatted_tb}"
) from e
except skip.Exception as e:
if e.allow_module_level:
raise
raise nodes.Collector.CollectError(
"Using pytest.skip outside of a test will skip the entire module. "
"If that's your intention, pass `allow_module_level=True`. "
"If you want to skip a specific test or an entire class, "
"use the @pytest.mark.skip or @pytest.mark.skipif decorators."
) from e
config.pluginmanager.consider_module(mod)
return mod
| PyCollector |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/timestamp.py | {
"start": 539,
"end": 651
} | class ____(NamedTuple):
timestamp: float # Seconds since the Unix epoch
timezone: str
| TimestampWithTimezone |
python | pypa__pipenv | pipenv/patched/pip/_internal/vcs/versioncontrol.py | {
"start": 2854,
"end": 4288
} | class ____:
"""
Encapsulates a VCS-specific revision to install, along with any VCS
install options.
Args:
vc_class: a VersionControl subclass.
rev: the name of the revision to install.
extra_args: a list of extra options.
"""
vc_class: Type["VersionControl"]
rev: Optional[str] = None
extra_args: CommandArgs = field(default_factory=list)
branch_name: Optional[str] = None
def __repr__(self) -> str:
return f"<RevOptions {self.vc_class.name}: rev={self.rev!r}>"
@property
def arg_rev(self) -> Optional[str]:
if self.rev is None:
return self.vc_class.default_arg_rev
return self.rev
def to_args(self) -> CommandArgs:
"""
Return the VCS-specific command arguments.
"""
args: CommandArgs = []
rev = self.arg_rev
if rev is not None:
args += self.vc_class.get_base_rev_args(rev)
args += self.extra_args
return args
def to_display(self) -> str:
if not self.rev:
return ""
return f" (to revision {self.rev})"
def make_new(self, rev: str) -> "RevOptions":
"""
Make a copy of the current instance, but with a new rev.
Args:
rev: the name of the revision for the new object.
"""
return self.vc_class.make_rev_options(rev, extra_args=self.extra_args)
| RevOptions |
python | ansible__ansible | test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/action/action1.py | {
"start": 84,
"end": 680
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
""" handler for file transfer operations """
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped'):
return result
module_args = self._task.args.copy()
result.update(
self._execute_module(
module_name='me.mycoll2.module1',
module_args=module_args,
task_vars=task_vars,
)
)
return result
| ActionModule |
python | PyCQA__pylint | tests/regrtest_data/descriptor_crash.py | {
"start": 45,
"end": 358
} | class ____(object):
_urlOpen = staticmethod(urllib.urlopen)
def getPage(self, url):
handle = self._urlOpen(url)
data = handle.read()
handle.close()
return data
#_getPage
#Page
if __name__ == "__main__":
import sys
p = Page()
print p.getPage(sys.argv[1])
| Page |
python | lazyprogrammer__machine_learning_examples | unsupervised_class3/dcgan_tf.py | {
"start": 719,
"end": 2129
} | class ____:
def __init__(self, name, mi, mo, apply_batch_norm, filtersz=5, stride=2, f=tf.nn.relu):
# mi = input feature map size
# mo = output feature map size
# self.W = tf.Variable(0.02*tf.random_normal(shape=(filtersz, filtersz, mi, mo)))
# self.b = tf.Variable(np.zeros(mo, dtype=np.float32))
self.W = tf.get_variable(
"W_%s" % name,
shape=(filtersz, filtersz, mi, mo),
# initializer=tf.contrib.layers.xavier_initializer(),
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
self.b = tf.get_variable(
"b_%s" % name,
shape=(mo,),
initializer=tf.zeros_initializer(),
)
self.name = name
self.f = f
self.stride = stride
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
def forward(self, X, reuse, is_training):
# print("**************** reuse:", reuse)
conv_out = tf.nn.conv2d(
X,
self.W,
strides=[1, self.stride, self.stride, 1],
padding='SAME'
)
conv_out = tf.nn.bias_add(conv_out, self.b)
# apply batch normalization
if self.apply_batch_norm:
conv_out = tf.contrib.layers.batch_norm(
conv_out,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
reuse=reuse,
scope=self.name,
)
return self.f(conv_out)
| ConvLayer |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/conv_test.py | {
"start": 168,
"end": 1299
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, IC, OC, kernel, stride, N, L, device):
self.inputs = {
"input": torch.rand(N, IC, L, device=device, requires_grad=self.auto_set())
}
self.conv1d = nn.Conv1d(IC, OC, kernel, stride=stride).to(device=device)
self.set_module_name("Conv1d")
def forward(self, input):
return self.conv1d(input)
def get_memory_traffic_bytes(self):
"""Calculate memory traffic for Conv1d: read(input + weight) + write(output)"""
input_tensor = self.inputs["input"]
# Run forward to get output shape
with torch.no_grad():
output = self.conv1d(input_tensor)
bytes_per_element = input_tensor.element_size()
# Input: N × IC × L
input_elements = input_tensor.numel()
# Weight: OC × IC × kernel
weight_elements = self.conv1d.weight.numel()
# Output: N × OC × L_out
output_elements = output.numel()
total_elements = input_elements + weight_elements + output_elements
return total_elements * bytes_per_element
| Conv1dBenchmark |
python | pytorch__pytorch | torch/autograd/profiler_util.py | {
"start": 32763,
"end": 39132
} | class ____(FormattedTimesMixin):
"""Averaged profiling statistics over multiple FunctionEvent objects.
FunctionEventAvg aggregates statistics from multiple FunctionEvent objects
with the same key (typically same operation name). This is useful for getting
average performance metrics across multiple invocations of the same operation.
This class is typically created by calling :meth:`EventList.key_averages()` on
a profiler's event list.
Attributes:
key (str): Grouping key for the events (typically operation name).
count (int): Total number of events aggregated.
node_id (int): Node identifier for distributed profiling (-1 if not applicable).
is_async (bool): Whether the operations are asynchronous.
is_remote (bool): Whether the operations occurred on a remote node.
use_device (str): Device type being profiled ("cuda", "xpu", etc.).
cpu_time_total (int): Accumulated total CPU time in microseconds.
device_time_total (int): Accumulated total device time in microseconds.
self_cpu_time_total (int): Accumulated self CPU time (excluding children) in microseconds.
self_device_time_total (int): Accumulated self device time (excluding children) in microseconds.
input_shapes (List[List[int]]): Input tensor shapes (requires record_shapes=true).
overload_name (str): Operator overload name (requires _ExperimentalConfig(capture_overload_names=True) set).
stack (List[str]): Python stack trace where the operation was called (requires with_stack=true).
scope (int): at::RecordScope identifier (0=forward, 1=backward, etc.).
cpu_memory_usage (int): Accumulated CPU memory usage in bytes.
device_memory_usage (int): Accumulated device memory usage in bytes.
self_cpu_memory_usage (int): Accumulated self CPU memory usage in bytes.
self_device_memory_usage (int): Accumulated self device memory usage in bytes.
cpu_children (List[FunctionEvent]): CPU child events.
cpu_parent (FunctionEvent): CPU parent event.
device_type (DeviceType): Type of device (CPU, CUDA, XPU, PrivateUse1, etc.).
is_legacy (bool): Whether from legacy profiler.
flops (int): Total floating point operations.
is_user_annotation (bool): Whether this is a user-annotated region.
Properties:
cpu_time (float): Average CPU time per invocation.
device_time (float): Average device time per invocation.
See Also:
- :class:`EventList.key_averages`: Method that creates FunctionEventAvg objects
- :class:`FunctionEvent`: Individual profiling event
- :class:`EventList`: Container for profiling events
"""
def __init__(self) -> None:
self.key: Optional[str] = None
self.count: int = 0
self.node_id: int = 0
self.is_async: bool = False
self.is_remote: bool = False
self.use_device: Optional[str] = None
self.cpu_time_total: int = 0
self.device_time_total: int = 0
self.self_cpu_time_total: int = 0
self.self_device_time_total: int = 0
self.input_shapes: Optional[list[list[int]]] = None
self.overload_name: Optional[str] = None
self.stack: Optional[list] = None
self.scope: Optional[int] = None
self.cpu_memory_usage: int = 0
self.device_memory_usage: int = 0
self.self_cpu_memory_usage: int = 0
self.self_device_memory_usage: int = 0
self.cpu_children: Optional[list[FunctionEvent]] = None
self.cpu_parent: Optional[FunctionEvent] = None
self.device_type: DeviceType = DeviceType.CPU
self.is_legacy: bool = False
self.flops: int = 0
def add(self, other):
if self.key is None:
# First function being recorded as part of FunctionEventAvg, propagate
# fields.
self.key = other.key
self.node_id = other.node_id
self.is_async = other.is_async
self.is_remote = other.is_remote
self.cpu_parent = other.cpu_parent
self.cpu_children = other.cpu_children
self.overload_name = other.overload_name
self.input_shapes = other.input_shapes
self.stack = other.stack
self.scope = other.scope
self.device_type = other.device_type
self.is_legacy = other.is_legacy
self.use_device = other.use_device
self.is_user_annotation = other.is_user_annotation
if not isinstance(other, (FunctionEvent, FunctionEventAvg)):
raise AssertionError(
"Expected other to be a FunctionEvent or FunctionEventAvg"
)
if other.key != self.key:
raise AssertionError(
f"Expected keys to match, but got {other.key} vs {self.key}"
)
self.cpu_time_total += other.cpu_time_total
self.device_time_total += other.device_time_total
self.self_cpu_time_total += other.self_cpu_time_total
self.self_device_time_total += other.self_device_time_total
self.cpu_memory_usage += other.cpu_memory_usage
self.device_memory_usage += other.device_memory_usage
self.self_cpu_memory_usage += other.self_cpu_memory_usage
self.self_device_memory_usage += other.self_device_memory_usage
self.count += other.count
if self.flops is None:
# pyrefly: ignore [bad-assignment]
self.flops = other.flops
elif other.flops is not None:
self.flops += other.flops
return self
def __iadd__(self, other):
return self.add(other)
def __repr__(self):
device_name = "cuda" if not self.use_device else self.use_device
self_device_time = self.self_device_time_total_str
device_time = self.device_time_str
device_memory = self.device_memory_usage
return (
f"<FunctionEventAvg key={self.key} self_cpu_time={self.self_cpu_time_total_str} cpu_time={self.cpu_time_str} "
f" self_{device_name}_time={self_device_time} {device_name}_time={device_time} input_shapes={str(self.input_shapes)} "
f"cpu_memory_usage={self.cpu_memory_usage} {device_name}_memory_usage={device_memory}>"
)
| FunctionEventAvg |
python | spack__spack | lib/spack/spack/test/installer_tui.py | {
"start": 445,
"end": 564
} | class ____:
"""Mock multiprocessing.Connection for testing"""
def fileno(self):
return -1
| MockConnection |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/condition/test_existing_high_priority_issue_handler.py | {
"start": 495,
"end": 3368
} | class ____(ConditionTestCase):
condition = Condition.EXISTING_HIGH_PRIORITY_ISSUE
payload = {"id": ExistingHighPriorityIssueCondition.id}
def setUp(self) -> None:
super().setUp()
self.event_data = WorkflowEventData(
event=self.group_event,
group=self.group_event.group,
group_state=GroupState(
{
"id": 1,
"is_regression": True,
"is_new": False,
"is_new_group_environment": False,
}
),
has_reappeared=True,
has_escalated=True,
)
self.dc = self.create_data_condition(
type=self.condition,
comparison=True,
condition_result=True,
)
self.group_event.group.priority = PriorityLevel.HIGH
def test_dual_write(self) -> None:
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == self.condition
assert dc.comparison is True
assert dc.condition_result is True
assert dc.condition_group == dcg
def test_json_schema(self) -> None:
dc = self.create_data_condition(
type=self.condition,
comparison=True,
condition_result=True,
)
dc.comparison = False
dc.save()
dc.comparison = {"time": "asdf"}
with pytest.raises(ValidationError):
dc.save()
dc.comparison = "hello"
with pytest.raises(ValidationError):
dc.save()
def test(self) -> None:
self.assert_passes(self.dc, self.event_data)
def test_group_state_is_new(self) -> None:
assert self.event_data.group_state
self.event_data.group_state["is_new"] = True
self.assert_does_not_pass(self.dc, self.event_data)
def test_is_escalating(self) -> None:
self.event_data = replace(self.event_data, has_reappeared=False, has_escalated=True)
self.assert_passes(self.dc, self.event_data)
self.event_data = replace(self.event_data, has_reappeared=True, has_escalated=True)
self.assert_passes(self.dc, self.event_data)
self.event_data = replace(self.event_data, has_reappeared=False, has_escalated=False)
self.assert_does_not_pass(self.dc, self.event_data)
self.event_data = replace(self.event_data, has_reappeared=True, has_escalated=False)
self.assert_does_not_pass(self.dc, self.event_data)
def test_priority(self) -> None:
self.group_event.group.priority = PriorityLevel.LOW
self.assert_does_not_pass(self.dc, self.event_data)
self.group_event.group.priority = PriorityLevel.MEDIUM
self.assert_does_not_pass(self.dc, self.event_data)
| TestExistingHighPriorityIssueCondition |
python | ansible__ansible | test/units/module_utils/datatag/test_datatag.py | {
"start": 1976,
"end": 3660
} | class ____(t.Protocol):
def copy(self) -> t.Any:
"""Copy this instance."""
message_instances = [
_messages.Event(msg="bla", formatted_source_context="sc"),
_messages.EventChain(msg_reason="a", traceback_reason="b", event=_messages.Event(msg="c")),
_messages.ErrorSummary(event=_messages.Event(msg="bla", formatted_traceback="tb")),
_messages.WarningSummary(event=_messages.Event(msg="bla", formatted_source_context="sc", formatted_traceback="tb")),
_messages.DeprecationSummary(event=_messages.Event(msg="bla", formatted_source_context="sc", formatted_traceback="tb"), version="1.2.3"),
_messages.PluginInfo(resolved_name='a.b.c', type=_messages.PluginType.MODULE),
_messages.PluginType.MODULE,
]
def assert_round_trip(original_value, round_tripped_value, via_copy=False):
assert original_value == round_tripped_value
assert AnsibleTagHelper.tags(original_value) == AnsibleTagHelper.tags(round_tripped_value)
if via_copy and type(original_value) is tuple: # pylint: disable=unidiomatic-typecheck
# copy.copy/copy.deepcopy significantly complicate the rules for reference equality with tuple, skip the following checks for values sourced that way
# tuple impl of __copy__ always returns the same instance, __deepcopy__ always returns the same instance if its contents are immutable
return
# singleton values should rehydrate as the shared singleton instance, all others should be a new instance
if isinstance(original_value, (AnsibleSingletonTagBase, enum.Enum)):
assert original_value is round_tripped_value
else:
assert original_value is not round_tripped_value
| CopyProtocol |
python | doocs__leetcode | lcci/08.04.Power Set/Solution2.py | {
"start": 0,
"end": 308
} | class ____:
def subsets(self, nums: List[int]) -> List[List[int]]:
ans = []
for mask in range(1 << len(nums)):
t = []
for i, v in enumerate(nums):
if (mask >> i) & 1:
t.append(v)
ans.append(t)
return ans
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 57844,
"end": 58091
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
alert_id: str = Field(..., description="The canonical identifier of the SQL alert.")
| SqlTaskAlert |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 224589,
"end": 224692
} | class ____(suite.JSONTest):
__requires__ = ("postgresql_jsonb",)
datatype = JSONB
| JSONBSuiteTest |
python | PyCQA__pylint | tests/functional/m/method_hidden.py | {
"start": 237,
"end": 314
} | class ____:
"""dummy"""
def __init__(self):
self.abcd = 1
| Abcd |
python | walkccc__LeetCode | solutions/2616. Minimize the Maximum Difference of Pairs/2616.py | {
"start": 0,
"end": 573
} | class ____:
def minimizeMax(self, nums: list[int], p: int) -> int:
nums.sort()
def numPairs(maxDiff: int) -> int:
"""
Returns the number of pairs that can be obtained if the difference between
each pair <= `maxDiff`.
"""
pairs = 0
i = 1
while i < len(nums):
# Greedily pair nums[i] with nums[i - 1].
if nums[i] - nums[i - 1] <= maxDiff:
pairs += 1
i += 2
else:
i += 1
return pairs
return bisect.bisect_left(range(nums[-1] - nums[0]), p, key=numPairs)
| Solution |
python | openai__openai-python | src/openai/types/responses/response_input_item.py | {
"start": 8602,
"end": 8825
} | class ____(BaseModel):
path: str
"""Path of the file to delete relative to the workspace root."""
type: Literal["delete_file"]
"""The operation type. Always `delete_file`."""
| ApplyPatchCallOperationDeleteFile |
python | getsentry__sentry | tests/sentry/objectstore/endpoints/test_organization.py | {
"start": 177,
"end": 833
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-objectstore"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
def test_feature_flag_disabled(self):
"""Without feature flag, returns 404"""
response = self.get_response(self.organization.slug)
assert response.status_code == 404
@with_feature("organizations:objectstore-endpoint")
def test_feature_flag_enabled(self):
"""With feature flag, endpoint is accessible"""
response = self.get_response(self.organization.slug)
assert response.status_code == 200
| OrganizationObjectstoreEndpointTest |
python | getsentry__sentry | tests/snuba/rules/conditions/test_event_frequency.py | {
"start": 52639,
"end": 52879
} | class ____(
ErrorEventMixin, EventFrequencyPercentConditionTestCase
):
pass
@freeze_time(
(timezone.now() - timedelta(days=2)).replace(hour=12, minute=40, second=0, microsecond=0)
)
| ErrorIssueEventFrequencyPercentConditionTestCase |
python | getsentry__sentry | src/sentry/workflow_engine/processors/data_condition_group.py | {
"start": 6825,
"end": 15061
} | class ____:
logic_result: TriggerResult
condition_results: list[ProcessedDataCondition]
DataConditionGroupResult = tuple[ProcessedDataConditionGroup, list[DataCondition]]
# We use a defined function rather than a lambda below because otherwise
# parameter type becomes Any.
def _group_id_from_condition(condition: DataCondition) -> tuple[int]:
return (condition.condition_group_id,)
@cache_func_for_models(
[(DataCondition, _group_id_from_condition)],
recalculate=False,
)
def get_data_conditions_for_group(data_condition_group_id: int) -> list[DataCondition]:
return list(DataCondition.objects.filter(condition_group_id=data_condition_group_id))
@scopedstats.timer()
def _get_data_conditions_for_group_shim(data_condition_group_id: int) -> list[DataCondition]:
"""
Wrapper for single item use case so we can easily time it.
We can't timer() get_data_conditions_for_group because it's a CachedFunction, and
decorating it would turn it into a regular function and make `.batch()` unusable.
"""
return get_data_conditions_for_group(data_condition_group_id)
@sentry_sdk.trace
def get_slow_conditions_for_groups(
data_condition_group_ids: list[int],
) -> dict[int, list[DataCondition]]:
"""
Takes a list of DataConditionGroup IDs and returns a dict with
the slow conditions associated with each ID.
"""
args_list = [(group_id,) for group_id in data_condition_group_ids]
results = get_data_conditions_for_group.batch(args_list)
return {
group_id: [cond for cond in conditions if is_slow_condition(cond)]
for group_id, conditions in zip(data_condition_group_ids, results)
}
def evaluate_condition_group_results(
condition_results: list[ProcessedDataCondition],
logic_type: DataConditionGroup.Type,
) -> ProcessedDataConditionGroup:
logic_result = TriggerResult.FALSE
group_condition_results: list[ProcessedDataCondition] = []
if logic_type == DataConditionGroup.Type.NONE:
# if we get to this point, no conditions were met
# because we would have short-circuited
logic_result = TriggerResult.none(
condition_result.logic_result for condition_result in condition_results
)
elif logic_type == DataConditionGroup.Type.ANY:
logic_result = TriggerResult.any(
condition_result.logic_result for condition_result in condition_results
)
if logic_result.triggered:
group_condition_results = [
condition_result
for condition_result in condition_results
if condition_result.logic_result.triggered
]
elif logic_type == DataConditionGroup.Type.ALL:
conditions_met = [condition_result.logic_result for condition_result in condition_results]
logic_result = TriggerResult.all(conditions_met)
if logic_result.triggered:
group_condition_results = [
condition_result
for condition_result in condition_results
if condition_result.logic_result.triggered
]
return ProcessedDataConditionGroup(
logic_result=logic_result,
condition_results=group_condition_results,
)
@scopedstats.timer()
def evaluate_data_conditions(
conditions_to_evaluate: list[tuple[DataCondition, T]],
logic_type: DataConditionGroup.Type,
) -> ProcessedDataConditionGroup:
"""
Evaluate a list of conditions. Each condition is a tuple with the value to evaluate the condition against.
Next we apply the logic_type to get the results of the list of conditions.
"""
condition_results: list[ProcessedDataCondition] = []
if len(conditions_to_evaluate) == 0:
# if we don't have any conditions, always return True
return ProcessedDataConditionGroup(logic_result=TriggerResult.TRUE, condition_results=[])
for condition, value in conditions_to_evaluate:
evaluation_result = condition.evaluate_value(value)
cleaned_result: DataConditionResult
if isinstance(evaluation_result, ConditionError):
cleaned_result = None
else:
cleaned_result = evaluation_result
trigger_result = TriggerResult(
triggered=cleaned_result is not None,
error=evaluation_result if isinstance(evaluation_result, ConditionError) else None,
)
if trigger_result.triggered:
# Check for short-circuiting evaluations
if logic_type == DataConditionGroup.Type.ANY_SHORT_CIRCUIT:
condition_result = ProcessedDataCondition(
logic_result=trigger_result,
condition=condition,
result=cleaned_result,
)
return ProcessedDataConditionGroup(
logic_result=trigger_result,
condition_results=[condition_result],
)
if logic_type == DataConditionGroup.Type.NONE:
return ProcessedDataConditionGroup(
logic_result=TriggerResult(triggered=False, error=trigger_result.error),
condition_results=[],
)
result = ProcessedDataCondition(
logic_result=trigger_result,
condition=condition,
result=cleaned_result,
)
condition_results.append(result)
return evaluate_condition_group_results(
condition_results,
logic_type,
)
@scopedstats.timer()
def process_data_condition_group(
group: DataConditionGroup,
value: T,
data_conditions_for_group: list[DataCondition] | None = None,
) -> DataConditionGroupResult:
condition_results: list[ProcessedDataCondition] = []
try:
logic_type = DataConditionGroup.Type(group.logic_type)
except ValueError:
logger.exception(
"Invalid DataConditionGroup.logic_type found in process_data_condition_group",
extra={"logic_type": group.logic_type},
)
trigger_result = TriggerResult(
triggered=False, error=ConditionError(msg="Invalid DataConditionGroup.logic_type")
)
return ProcessedDataConditionGroup(logic_result=trigger_result, condition_results=[]), []
# Check if conditions are already prefetched before using cache
all_conditions: list[DataCondition]
if data_conditions_for_group is not None:
all_conditions = data_conditions_for_group
elif (
hasattr(group, "_prefetched_objects_cache")
and "conditions" in group._prefetched_objects_cache
):
all_conditions = list(group.conditions.all())
else:
all_conditions = _get_data_conditions_for_group_shim(group.id)
split_conds = split_conditions_by_speed(all_conditions)
if not split_conds.fast and split_conds.slow:
# there are only slow conditions to evaluate, do not evaluate an empty list of conditions
# which would evaluate to True
condition_group_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.FALSE,
condition_results=condition_results,
)
return condition_group_result, split_conds.slow
conditions_to_evaluate = [(condition, value) for condition in split_conds.fast]
processed_condition_group = evaluate_data_conditions(conditions_to_evaluate, logic_type)
logic_result = processed_condition_group.logic_result
# Check to see if we should return any remaining conditions based on the results
is_short_circuit_all = not logic_result.triggered and logic_type == DataConditionGroup.Type.ALL
is_short_circuit_any = logic_result.triggered and logic_type in (
DataConditionGroup.Type.ANY,
DataConditionGroup.Type.ANY_SHORT_CIRCUIT,
)
if is_short_circuit_all or is_short_circuit_any:
# if we have a logic type of all and a False result,
# or if we have a logic type of any and a True result, then
# we can short-circuit any remaining conditions since we have a completed logic result
return processed_condition_group, []
return processed_condition_group, split_conds.slow
| ProcessedDataConditionGroup |
python | fastai__fastai | fastai/vision/augment.py | {
"start": 28819,
"end": 31157
} | class ____(Flip):
"Flip the batch every other call"
def __init__(self,
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
align_corners=True, # PyTorch `F.grid_sample` align_corners
**kwargs
):
super().__init__(p=1., draw=DeterministicDraw([0,1]), mode=mode, pad_mode=pad_mode, align_corners=align_corners, **kwargs)
# %% ../../nbs/09_vision.augment.ipynb 153
def dihedral_mat(
x:Tensor, # Input `Tensor`
p:float=0.5, # Probability of staying unchanged
draw:int|MutableSequence|Callable=None, # Custom dihedrals instead of random
batch:bool=False # Apply identical dihedral to entire batch
):
"Return a random dihedral matrix"
def _def_draw(x): return torch.randint(0,8, (x.size(0),), device=x.device)
def _def_draw_b(x): return random.randint(0,7) + x.new_zeros((x.size(0),)).long()
idx = _draw_mask(x, _def_draw_b if batch else _def_draw, draw=draw, p=p, batch=batch).long()
xs = tensor([1,-1,1,-1,-1,1,1,-1], device=x.device).gather(0, idx)
ys = tensor([1,1,-1,1,-1,-1,1,-1], device=x.device).gather(0, idx)
m0 = tensor([1,1,1,0,1,0,0,0], device=x.device).gather(0, idx)
m1 = tensor([0,0,0,1,0,1,1,1], device=x.device).gather(0, idx)
return affine_mat(xs*m0, xs*m1, t0(xs),
ys*m1, ys*m0, t0(xs)).float()
# %% ../../nbs/09_vision.augment.ipynb 154
@patch
def dihedral_batch(x: TensorImage|TensorMask|TensorPoint|TensorBBox,
p=0.5, # Probability of applying dihedral
draw:int|MutableSequence|Callable=None, # Custom dihedrals instead of random
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation applied to `x`
pad_mode=None, # Padding applied to `x`
batch=False, # Apply identical dihedral to entire batch
align_corners=True # PyTorch `F.grid_sample` align_corners
):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat = _prepare_mat(x, dihedral_mat(x0, p=p, draw=draw, batch=batch))
return x.affine_coord(mat=mat, sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 155
| DeterministicFlip |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_pop.py | {
"start": 117,
"end": 2143
} | class ____:
def test_pop(self, float_frame):
float_frame.columns.name = "baz"
float_frame.pop("A")
assert "A" not in float_frame
float_frame["foo"] = "bar"
float_frame.pop("foo")
assert "foo" not in float_frame
assert float_frame.columns.name == "baz"
# gh-10912: inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"], index=["X", "Y"])
b = a.pop("B")
b += 1
# original frame
expected = DataFrame([[1, 3], [4, 6]], columns=["A", "C"], index=["X", "Y"])
tm.assert_frame_equal(a, expected)
# result
expected = Series([2, 5], index=["X", "Y"], name="B") + 1
tm.assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
assert type(res) == DataFrame
assert len(res) == 2
assert len(df.columns) == 1
assert "b" in df.columns
assert "a" not in df.columns
assert len(df.index) == 2
def test_mixed_depth_pop(self):
arrays = [
["a", "top", "top", "routine1", "routine1", "routine2"],
["", "OD", "OD", "result1", "result2", "result1"],
["", "wx", "wy", "", "", ""],
]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.default_rng(2).standard_normal((4, 6)), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop("a")
expected = df2.pop(("a", "", ""))
tm.assert_series_equal(expected, result, check_names=False)
tm.assert_frame_equal(df1, df2)
assert result.name == "a"
expected = df1["top"]
df1 = df1.drop(["top"], axis=1)
result = df2.pop("top")
tm.assert_frame_equal(expected, result)
tm.assert_frame_equal(df1, df2)
| TestDataFramePop |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/pagination.py | {
"start": 2312,
"end": 2752
} | class ____:
storage_id: int
def __str__(self) -> str:
return self.to_string()
def to_string(self) -> str:
string_serialized = serialize_value(self)
return base64.b64encode(bytes(string_serialized, encoding="utf-8")).decode(
"utf-8"
)
@classmethod
def from_cursor(cls, cursor: str):
return deserialize_value(base64.b64decode(cursor).decode("utf-8"), cls)
| StorageIdCursor |
python | run-llama__llama_index | llama-index-core/tests/agent/utils/test_agent_utils.py | {
"start": 634,
"end": 6454
} | class ____(LLM):
def __init__(self, responses: List[ChatMessage], structured_response: str):
super().__init__()
self._responses = responses
self._structured_response = structured_response
self._response_index = 0
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
async def astream_chat(
self, messages: List[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
response_msg = None
if self._responses:
response_msg = self._responses[self._response_index]
self._response_index = (self._response_index + 1) % len(self._responses)
async def _gen():
if response_msg:
yield ChatResponse(
message=response_msg,
delta=response_msg.content,
raw={"content": response_msg.content},
)
return _gen()
async def astream_chat_with_tools(
self, tools: List[Any], chat_history: List[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
response_msg = None
if self._responses:
response_msg = self._responses[self._response_index]
self._response_index = (self._response_index + 1) % len(self._responses)
async def _gen():
if response_msg:
yield ChatResponse(
message=response_msg,
delta=response_msg.content,
raw={"content": response_msg.content},
)
return _gen()
def get_tool_calls_from_response(
self, response: ChatResponse, **kwargs: Any
) -> List[ToolSelection]:
return response.message.additional_kwargs.get("tool_calls", [])
@override
async def astructured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
return output_cls.model_validate_json(self._structured_response)
@override
async def structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
return output_cls.model_validate_json(self._structured_response)
async def achat(self, *args, **kwargs):
pass
def chat(self, *args, **kwargs):
pass
def stream_chat(self, *args, **kwargs):
pass
def complete(self, *args, **kwargs):
pass
async def acomplete(self, *args, **kwargs):
pass
def stream_complete(self, *args, **kwargs):
pass
async def astream_complete(self, *args, **kwargs):
pass
def _prepare_chat_with_tools(self, *args, **kwargs):
return {}
@pytest.fixture
def chat_messages() -> List[ChatMessage]:
return [
ChatMessage(role="user", blocks=[TextBlock(text="hello")]),
ChatMessage(role="assistant", blocks=[TextBlock(text="hello back")]),
ChatMessage(role="user", blocks=[TextBlock(text="how are you?")]),
ChatMessage(role="assistant", blocks=[TextBlock(text="I am good, thank you.")]),
]
@pytest.fixture()
def chat_messages_sys(chat_messages: List[ChatMessage]) -> List[ChatMessage]:
return [
ChatMessage(role="system", content="You are a helpful assistant."),
*chat_messages,
]
@pytest.fixture
def xml_string() -> str:
return "<current_conversation>\n\t<user>\n\t\t<message>hello</message>\n\t</user>\n\t<assistant>\n\t\t<message>hello back</message>\n\t</assistant>\n\t<user>\n\t\t<message>how are you?</message>\n\t</user>\n\t<assistant>\n\t\t<message>I am good, thank you.</message>\n\t</assistant>\n</current_conversation>\n\nGiven the conversation, format the output according to the provided schema."
@pytest.fixture
def xml_string_sys() -> str:
return "<current_conversation>\n\t<system>\n\t\t<message>You are a helpful assistant.</message>\n\t</system>\n\t<user>\n\t\t<message>hello</message>\n\t</user>\n\t<assistant>\n\t\t<message>hello back</message>\n\t</assistant>\n\t<user>\n\t\t<message>how are you?</message>\n\t</user>\n\t<assistant>\n\t\t<message>I am good, thank you.</message>\n\t</assistant>\n</current_conversation>\n\nGiven the conversation, format the output according to the provided schema."
@pytest.fixture
def structured_response() -> str:
return Structure(hello="test", world=1).model_dump_json()
def test_messages_to_xml(chat_messages: List[ChatMessage], xml_string: str) -> None:
msg = messages_to_xml_format(chat_messages)
assert len(msg) == 1
assert isinstance(msg[0], ChatMessage)
s = ""
for block in msg[0].blocks:
s += block.text
assert s == xml_string
def test_messages_to_xml_sys(
chat_messages_sys: List[ChatMessage], xml_string_sys: str
) -> None:
msg = messages_to_xml_format(chat_messages_sys)
assert len(msg) == 2
assert isinstance(msg[0], ChatMessage)
assert msg[0].role == "system"
assert msg[0].content == "You are a helpful assistant."
s = ""
for block in msg[1].blocks:
s += block.text
assert s == xml_string_sys
@pytest.mark.asyncio
async def test_generate_structured_response(
chat_messages: List[ChatMessage], structured_response: str
) -> None:
llm = TestLLM(
responses=[ChatMessage(role="assistant", content="Hello World!")],
structured_response=structured_response,
)
generated_response = await generate_structured_response(
messages=chat_messages, llm=llm, output_cls=Structure
)
assert Structure.model_validate(
generated_response
) == Structure.model_validate_json(structured_response)
| TestLLM |
python | streamlit__streamlit | lib/streamlit/runtime/app_session.py | {
"start": 2754,
"end": 50410
} | class ____:
"""
Contains session data for a single "user" of an active app
(that is, a connected browser tab).
Each AppSession has its own ScriptData, root DeltaGenerator, ScriptRunner,
and widget state.
An AppSession is attached to each thread involved in running its script.
"""
def __init__(
self,
script_data: ScriptData,
uploaded_file_manager: UploadedFileManager,
script_cache: ScriptCache,
message_enqueued_callback: Callable[[], None] | None,
user_info: dict[str, str | bool | None],
session_id_override: str | None = None,
) -> None:
"""Initialize the AppSession.
Parameters
----------
script_data
Object storing parameters related to running a script
uploaded_file_manager
Used to manage files uploaded by users via the Streamlit web client.
script_cache
The app's ScriptCache instance. Stores cached user scripts. ScriptRunner
uses the ScriptCache to avoid having to reload user scripts from disk
on each rerun.
message_enqueued_callback
After enqueuing a message, this callable notification will be invoked.
user_info
A dict that contains information about the current user. For now,
it only contains the user's email address.
{
"email": "example@example.com"
}
Information about the current user is optionally provided when a
websocket connection is initialized via the "X-Streamlit-User" header.
session_id_override
The ID to assign to this session. Setting this can be useful when the
service that a Streamlit Runtime is running in wants to tie the lifecycle of
a Streamlit session to some other session-like object that it manages.
"""
# Each AppSession has a unique string ID.
self.id = session_id_override or str(uuid.uuid4())
self._event_loop = asyncio.get_running_loop()
self._script_data = script_data
self._uploaded_file_mgr = uploaded_file_manager
self._script_cache = script_cache
self._pages_manager = PagesManager(
script_data.main_script_path, self._script_cache
)
# The browser queue contains messages that haven't yet been
# delivered to the browser. Periodically, the server flushes
# this queue and delivers its contents to the browser.
self._browser_queue = ForwardMsgQueue()
self._message_enqueued_callback = message_enqueued_callback
self._state = AppSessionState.APP_NOT_RUNNING
# Need to remember the client state here because when a script reruns
# due to the source code changing we need to pass in the previous client state.
self._client_state = ClientState()
self._local_sources_watcher: LocalSourcesWatcher | None = None
self._stop_config_listener: Callable[[], None] | None = None
self._stop_pages_listener: Callable[[], None] | None = None
if config.get_option("server.fileWatcherType") != "none":
self.register_file_watchers()
self._run_on_save = config.get_option("server.runOnSave")
self._scriptrunner: ScriptRunner | None = None
# This needs to be lazily imported to avoid a dependency cycle.
from streamlit.runtime.state import SessionState
self._session_state = SessionState()
self._user_info = user_info
self._debug_last_backmsg_id: str | None = None
self._fragment_storage: FragmentStorage = MemoryFragmentStorage()
_LOGGER.debug("AppSession initialized (id=%s)", self.id)
def __del__(self) -> None:
"""Ensure that we call shutdown() when an AppSession is garbage collected."""
self.shutdown()
def register_file_watchers(self) -> None:
"""Register handlers to be called when various files are changed.
Files that we watch include:
- source files that already exist (for edits)
- `.py` files in the main script's `pages/` directory (for file additions
and deletions)
- project and user-level config.toml files
- the project-level secrets.toml files
This method is called automatically on AppSession construction, but it may be
called again in the case when a session is disconnected and is being reconnect
to.
"""
if self._local_sources_watcher is None:
self._local_sources_watcher = LocalSourcesWatcher(self._pages_manager)
self._local_sources_watcher.register_file_change_callback(
self._on_source_file_changed
)
self._stop_config_listener = config.on_config_parsed(
self._on_source_file_changed, force_connect=True
)
secrets_singleton.file_change_listener.connect(self._on_secrets_file_changed)
def disconnect_file_watchers(self) -> None:
"""Disconnect the file watcher handlers registered by register_file_watchers."""
if self._local_sources_watcher is not None:
self._local_sources_watcher.close()
if self._stop_config_listener is not None:
self._stop_config_listener()
if self._stop_pages_listener is not None:
self._stop_pages_listener()
secrets_singleton.file_change_listener.disconnect(self._on_secrets_file_changed)
self._local_sources_watcher = None
self._stop_config_listener = None
self._stop_pages_listener = None
def flush_browser_queue(self) -> list[ForwardMsg]:
"""Clear the forward message queue and return the messages it contained.
The Server calls this periodically to deliver new messages
to the browser connected to this app.
Returns
-------
list[ForwardMsg]
The messages that were removed from the queue and should
be delivered to the browser.
"""
return self._browser_queue.flush()
def shutdown(self) -> None:
"""Shut down the AppSession.
It's an error to use a AppSession after it's been shut down.
"""
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
_LOGGER.debug("Shutting down (id=%s)", self.id)
# Clear any unused session files in upload file manager and media
# file manager
self._uploaded_file_mgr.remove_session_files(self.id)
if runtime.exists():
rt = runtime.get_instance()
rt.media_file_mgr.clear_session_refs(self.id)
rt.media_file_mgr.remove_orphaned_files()
# Shut down the ScriptRunner, if one is active.
# self._state must not be set to SHUTDOWN_REQUESTED until
# *after* this is called.
self.request_script_stop()
self._state = AppSessionState.SHUTDOWN_REQUESTED
# Disconnect all file watchers if we haven't already, although we will have
# generally already done so by the time we get here.
self.disconnect_file_watchers()
def _enqueue_forward_msg(self, msg: ForwardMsg) -> None:
"""Enqueue a new ForwardMsg to our browser queue.
This can be called on both the main thread and a ScriptRunner
run thread.
Parameters
----------
msg : ForwardMsg
The message to enqueue
"""
if self._debug_last_backmsg_id:
msg.debug_last_backmsg_id = self._debug_last_backmsg_id
self._browser_queue.enqueue(msg)
if self._message_enqueued_callback:
self._message_enqueued_callback()
def handle_backmsg(self, msg: BackMsg) -> None:
"""Process a BackMsg."""
try:
msg_type = msg.WhichOneof("type")
if msg_type == "rerun_script":
if msg.debug_last_backmsg_id:
self._debug_last_backmsg_id = msg.debug_last_backmsg_id
self._handle_rerun_script_request(msg.rerun_script)
elif msg_type == "load_git_info":
self._handle_git_information_request()
elif msg_type == "clear_cache":
self._handle_clear_cache_request()
elif msg_type == "app_heartbeat":
self._handle_app_heartbeat_request()
elif msg_type == "set_run_on_save":
self._handle_set_run_on_save_request(msg.set_run_on_save)
elif msg_type == "stop_script":
self._handle_stop_script_request()
elif msg_type == "file_urls_request":
self._handle_file_urls_request(msg.file_urls_request)
elif msg_type == "deferred_file_request":
# Execute deferred callable in a separate thread to avoid blocking
# the main event loop. Use create_task to run the async handler.
# Store task reference to prevent garbage collection.
task = asyncio.create_task(
self._handle_deferred_file_request(msg.deferred_file_request)
)
# Add task name for better debugging
task.set_name(f"deferred_file_{msg.deferred_file_request.file_id}")
else:
_LOGGER.warning('No handler for "%s"', msg_type)
except Exception as ex:
_LOGGER.exception("Error processing back message")
self.handle_backmsg_exception(ex)
def handle_backmsg_exception(self, e: BaseException) -> None:
"""Handle an Exception raised while processing a BackMsg from the browser."""
# This does a few things:
# 1) Clears the current app in the browser.
# 2) Marks the current app as "stopped" in the browser.
# 3) HACK: Resets any script params that may have been broken (e.g. the
# command-line when rerunning with wrong argv[0])
self._on_scriptrunner_event(
self._scriptrunner, ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
)
self._on_scriptrunner_event(
self._scriptrunner,
ScriptRunnerEvent.SCRIPT_STARTED,
page_script_hash="",
)
self._on_scriptrunner_event(
self._scriptrunner, ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
)
# Send an Exception message to the frontend.
# Because _on_scriptrunner_event does its work in an eventloop callback,
# this exception ForwardMsg *must* also be enqueued in a callback,
# so that it will be enqueued *after* the various ForwardMsgs that
# _on_scriptrunner_event sends.
self._event_loop.call_soon_threadsafe(
lambda: self._enqueue_forward_msg(self._create_exception_message(e))
)
def request_rerun(self, client_state: ClientState | None) -> None:
"""Signal that we're interested in running the script.
If the script is not already running, it will be started immediately.
Otherwise, a rerun will be requested.
Parameters
----------
client_state : streamlit.proto.ClientState_pb2.ClientState | None
The ClientState protobuf to run the script with, or None
to use previous client state.
"""
if self._state == AppSessionState.SHUTDOWN_REQUESTED:
_LOGGER.warning("Discarding rerun request after shutdown")
return
if client_state:
fragment_id = client_state.fragment_id
# Early check whether this fragment still exists in the fragment storage or
# might have been removed by a full app run. This is not merely a
# performance optimization, but also fixes following potential situation:
# A fragment run might create a new ScriptRunner when the current
# ScriptRunner is in state STOPPED (in this case, the 'success' variable
# below is false and the new ScriptRunner is created). This will lead to all
# events that were not sent / received from the previous script runner to be
# ignored in _handle_scriptrunner_event_on_event_loop, because the
# _script_runner changed. When the full app rerun ScriptRunner is done
# (STOPPED) but its events are not processed before the new ScriptRunner is
# created, its finished message is not sent to the frontend and no
# full-app-run cleanup is happening. This scenario can be triggered by the
# example app described in
# https://github.com/streamlit/streamlit/issues/9921, where the dialog
# sometimes stays open.
if fragment_id and not self._fragment_storage.contains(fragment_id):
_LOGGER.info(
"The fragment with id %s does not exist anymore - "
"it might have been removed during a preceding full-app rerun.",
fragment_id,
)
return
if client_state.HasField("context_info"):
self._client_state.context_info.CopyFrom(client_state.context_info)
rerun_data = RerunData(
query_string=client_state.query_string,
widget_states=client_state.widget_states,
page_script_hash=client_state.page_script_hash,
page_name=client_state.page_name,
fragment_id=fragment_id if fragment_id else None,
is_auto_rerun=client_state.is_auto_rerun,
cached_message_hashes=set(client_state.cached_message_hashes),
context_info=client_state.context_info,
)
else:
rerun_data = RerunData()
if self._scriptrunner is not None:
if (
bool(config.get_option("runner.fastReruns"))
and not rerun_data.fragment_id
):
# If fastReruns is enabled and this is *not* a rerun of a fragment,
# we don't send rerun requests to our existing ScriptRunner. Instead, we
# tell it to shut down. We'll then spin up a new ScriptRunner, below, to
# handle the rerun immediately.
self._scriptrunner.request_stop()
self._scriptrunner = None
else:
# Either fastReruns is not enabled or this RERUN request is a request to
# run a fragment. We send our current ScriptRunner a rerun request, and
# if it's accepted, we're done.
success = self._scriptrunner.request_rerun(rerun_data)
if success:
return
# If we are here, then either we have no ScriptRunner, or our
# current ScriptRunner is shutting down and cannot handle a rerun
# request - so we'll create and start a new ScriptRunner.
self._create_scriptrunner(rerun_data)
def request_script_stop(self) -> None:
"""Request that the scriptrunner stop execution.
Does nothing if no scriptrunner exists.
"""
if self._scriptrunner is not None:
self._scriptrunner.request_stop()
def clear_user_info(self) -> None:
"""Clear the user info for this session."""
self._user_info.clear()
def _create_scriptrunner(self, initial_rerun_data: RerunData) -> None:
"""Create and run a new ScriptRunner with the given RerunData."""
self._scriptrunner = ScriptRunner(
session_id=self.id,
main_script_path=self._script_data.main_script_path,
session_state=self._session_state,
uploaded_file_mgr=self._uploaded_file_mgr,
script_cache=self._script_cache,
initial_rerun_data=initial_rerun_data,
user_info=self._user_info,
fragment_storage=self._fragment_storage,
pages_manager=self._pages_manager,
)
self._scriptrunner.on_event.connect(self._on_scriptrunner_event)
self._scriptrunner.start()
@property
def session_state(self) -> SessionState:
return self._session_state
def _should_rerun_on_file_change(self, filepath: str) -> bool:
pages = self._pages_manager.get_pages()
changed_page_script_hash = next(
filter(lambda k: pages[k]["script_path"] == filepath, pages),
None,
)
if changed_page_script_hash is not None:
current_page_script_hash = self._client_state.page_script_hash
return changed_page_script_hash == current_page_script_hash
return True
def _on_source_file_changed(self, filepath: str | None = None) -> None:
"""One of our source files changed. Clear the cache and schedule a rerun if
appropriate.
"""
self._script_cache.clear()
if filepath is not None and not self._should_rerun_on_file_change(filepath):
return
if self._run_on_save:
self.request_rerun(self._client_state)
else:
self._enqueue_forward_msg(self._create_file_change_message())
def _on_secrets_file_changed(self, _: Any) -> None:
"""Called when `secrets.file_change_listener` emits a Signal."""
# NOTE: At the time of writing, this function only calls
# `_on_source_file_changed`. The reason behind creating this function instead of
# just passing `_on_source_file_changed` to `connect` / `disconnect` directly is
# that every function that is passed to `connect` / `disconnect` must have at
# least one argument for `sender` (in this case we don't really care about it,
# thus `_`), and introducing an unnecessary argument to
# `_on_source_file_changed` just for this purpose sounded finicky.
self._on_source_file_changed()
def _clear_queue(self, fragment_ids_this_run: list[str] | None = None) -> None:
self._browser_queue.clear(
retain_lifecycle_msgs=True, fragment_ids_this_run=fragment_ids_this_run
)
def _on_scriptrunner_event(
self,
sender: ScriptRunner | None,
event: ScriptRunnerEvent,
forward_msg: ForwardMsg | None = None,
exception: BaseException | None = None,
client_state: ClientState | None = None,
page_script_hash: str | None = None,
fragment_ids_this_run: list[str] | None = None,
pages: dict[PageHash, PageInfo] | None = None,
) -> None:
"""Called when our ScriptRunner emits an event.
This is generally called from the sender ScriptRunner's script thread.
We forward the event on to _handle_scriptrunner_event_on_event_loop,
which will be called on the main thread.
"""
self._event_loop.call_soon_threadsafe(
lambda: self._handle_scriptrunner_event_on_event_loop(
sender,
event,
forward_msg,
exception,
client_state,
page_script_hash,
fragment_ids_this_run,
pages,
)
)
def _handle_scriptrunner_event_on_event_loop(
self,
sender: ScriptRunner | None,
event: ScriptRunnerEvent,
forward_msg: ForwardMsg | None = None,
exception: BaseException | None = None,
client_state: ClientState | None = None,
page_script_hash: str | None = None,
fragment_ids_this_run: list[str] | None = None,
pages: dict[PageHash, PageInfo] | None = None,
) -> None:
"""Handle a ScriptRunner event.
This function must only be called on our eventloop thread.
Parameters
----------
sender : ScriptRunner | None
The ScriptRunner that emitted the event. (This may be set to
None when called from `handle_backmsg_exception`, if no
ScriptRunner was active when the backmsg exception was raised.)
event : ScriptRunnerEvent
The event type.
forward_msg : ForwardMsg | None
The ForwardMsg to send to the frontend. Set only for the
ENQUEUE_FORWARD_MSG event.
exception : BaseException | None
An exception thrown during compilation. Set only for the
SCRIPT_STOPPED_WITH_COMPILE_ERROR event.
client_state : streamlit.proto.ClientState_pb2.ClientState | None
The ScriptRunner's final ClientState. Set only for the
SHUTDOWN event.
page_script_hash : str | None
A hash of the script path corresponding to the page currently being
run. Set only for the SCRIPT_STARTED event.
fragment_ids_this_run : list[str] | None
The fragment IDs of the fragments being executed in this script run. Only
set for the SCRIPT_STARTED event. If this value is falsy, this script run
must be for the full script.
clear_forward_msg_queue : bool
If set (the default), clears the queue of forward messages to be sent to the
browser. Set only for the SCRIPT_STARTED event.
"""
if self._event_loop != asyncio.get_running_loop():
raise RuntimeError(
"This function must only be called on the eventloop thread the AppSession was created on. "
"This should never happen."
)
if sender is not self._scriptrunner:
# This event was sent by a non-current ScriptRunner; ignore it.
# This can happen after sppinng up a new ScriptRunner (to handle a
# rerun request, for example) while another ScriptRunner is still
# shutting down. The shutting-down ScriptRunner may still
# emit events.
_LOGGER.debug("Ignoring event from non-current ScriptRunner: %s", event)
return
prev_state = self._state
if event == ScriptRunnerEvent.SCRIPT_STARTED:
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
self._state = AppSessionState.APP_IS_RUNNING
if page_script_hash is None:
raise RuntimeError(
"page_script_hash must be set for the SCRIPT_STARTED event. This should never happen."
)
# Update the client state with the new page_script_hash if
# necessary. This handles an edge case where a script is never
# finishes (eg. by calling st.rerun()), but the page has changed
# via st.navigation()
if page_script_hash != self._client_state.page_script_hash:
self._client_state.page_script_hash = page_script_hash
self._clear_queue(fragment_ids_this_run)
msg = self._create_new_session_message(
page_script_hash, fragment_ids_this_run, pages
)
self._enqueue_forward_msg(msg)
elif event in {
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_COMPILE_ERROR,
ScriptRunnerEvent.FRAGMENT_STOPPED_WITH_SUCCESS,
}:
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
self._state = AppSessionState.APP_NOT_RUNNING
if event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS:
status = ForwardMsg.FINISHED_SUCCESSFULLY
elif event == ScriptRunnerEvent.FRAGMENT_STOPPED_WITH_SUCCESS:
status = ForwardMsg.FINISHED_FRAGMENT_RUN_SUCCESSFULLY
else:
status = ForwardMsg.FINISHED_WITH_COMPILE_ERROR
self._enqueue_forward_msg(self._create_script_finished_message(status))
self._debug_last_backmsg_id = None
if event in {
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.FRAGMENT_STOPPED_WITH_SUCCESS,
}:
# The script completed successfully: update our
# LocalSourcesWatcher to account for any source code changes
# that change which modules should be watched.
if self._local_sources_watcher:
self._local_sources_watcher.update_watched_modules()
self._local_sources_watcher.update_watched_pages()
else:
# The script didn't complete successfully: send the exception
# to the frontend.
if exception is None:
raise RuntimeError(
"exception must be set for the SCRIPT_STOPPED_WITH_COMPILE_ERROR event. "
"This should never happen."
)
msg = ForwardMsg()
exception_utils.marshall(
msg.session_event.script_compilation_exception, exception
)
self._enqueue_forward_msg(msg)
elif event == ScriptRunnerEvent.SCRIPT_STOPPED_FOR_RERUN:
self._state = AppSessionState.APP_NOT_RUNNING
self._enqueue_forward_msg(
self._create_script_finished_message(
ForwardMsg.FINISHED_EARLY_FOR_RERUN
)
)
if self._local_sources_watcher:
self._local_sources_watcher.update_watched_modules()
elif event == ScriptRunnerEvent.SHUTDOWN:
if client_state is None:
raise RuntimeError(
"client_state must be set for the SHUTDOWN event. This should never happen."
)
if self._state == AppSessionState.SHUTDOWN_REQUESTED:
# Only clear media files if the script is done running AND the
# session is actually shutting down.
runtime.get_instance().media_file_mgr.clear_session_refs(self.id)
self._client_state = client_state
self._scriptrunner = None
elif event == ScriptRunnerEvent.ENQUEUE_FORWARD_MSG:
if forward_msg is None:
raise RuntimeError(
"null forward_msg in ENQUEUE_FORWARD_MSG event. This should never happen."
)
self._enqueue_forward_msg(forward_msg)
# Send a message if our run state changed
app_was_running = prev_state == AppSessionState.APP_IS_RUNNING
app_is_running = self._state == AppSessionState.APP_IS_RUNNING
if app_is_running != app_was_running:
self._enqueue_forward_msg(self._create_session_status_changed_message())
def _create_session_status_changed_message(self) -> ForwardMsg:
"""Create and return a session_status_changed ForwardMsg."""
msg = ForwardMsg()
msg.session_status_changed.run_on_save = self._run_on_save
msg.session_status_changed.script_is_running = (
self._state == AppSessionState.APP_IS_RUNNING
)
return msg
def _create_file_change_message(self) -> ForwardMsg:
"""Create and return a 'script_changed_on_disk' ForwardMsg."""
msg = ForwardMsg()
msg.session_event.script_changed_on_disk = True
return msg
def _create_new_session_message(
self,
page_script_hash: str,
fragment_ids_this_run: list[str] | None = None,
pages: dict[PageHash, PageInfo] | None = None,
) -> ForwardMsg:
"""Create and return a new_session ForwardMsg."""
msg = ForwardMsg()
msg.new_session.script_run_id = _generate_scriptrun_id()
msg.new_session.name = self._script_data.name
msg.new_session.main_script_path = self._pages_manager.main_script_path
msg.new_session.main_script_hash = self._pages_manager.main_script_hash
msg.new_session.page_script_hash = page_script_hash
if fragment_ids_this_run:
msg.new_session.fragment_ids_this_run.extend(fragment_ids_this_run)
self._populate_app_pages(
msg.new_session, pages or self._pages_manager.get_pages()
)
_populate_config_msg(msg.new_session.config)
# Handles theme sections
# [theme] configs
_populate_theme_msg(msg.new_session.custom_theme)
# [theme.light] configs
_populate_theme_msg(
msg.new_session.custom_theme.light,
f"theme.{config.CustomThemeCategories.LIGHT.value}",
)
# [theme.dark] configs
_populate_theme_msg(
msg.new_session.custom_theme.dark,
f"theme.{config.CustomThemeCategories.DARK.value}",
)
# [theme.sidebar] configs
_populate_theme_msg(
msg.new_session.custom_theme.sidebar,
f"theme.{config.CustomThemeCategories.SIDEBAR.value}",
)
# [theme.light.sidebar] configs
_populate_theme_msg(
msg.new_session.custom_theme.light.sidebar,
f"theme.{config.CustomThemeCategories.LIGHT_SIDEBAR.value}",
)
# [theme.dark.sidebar] configs
_populate_theme_msg(
msg.new_session.custom_theme.dark.sidebar,
f"theme.{config.CustomThemeCategories.DARK_SIDEBAR.value}",
)
# Immutable session data. We send this every time a new session is
# started, to avoid having to track whether the client has already
# received it. It does not change from run to run; it's up to the
# to perform one-time initialization only once.
imsg = msg.new_session.initialize
_populate_user_info_msg(imsg.user_info)
imsg.environment_info.streamlit_version = STREAMLIT_VERSION_STRING
imsg.environment_info.python_version = ".".join(map(str, sys.version_info))
imsg.environment_info.server_os = env_util.SYSTEM
imsg.environment_info.has_display = (
"DISPLAY" in os.environ or "WAYLAND_DISPLAY" in os.environ
)
imsg.session_status.run_on_save = self._run_on_save
imsg.session_status.script_is_running = (
self._state == AppSessionState.APP_IS_RUNNING
)
imsg.is_hello = self._script_data.is_hello
imsg.session_id = self.id
return msg
def _create_script_finished_message(
self, status: ForwardMsg.ScriptFinishedStatus.ValueType
) -> ForwardMsg:
"""Create and return a script_finished ForwardMsg."""
msg = ForwardMsg()
msg.script_finished = status
return msg
def _create_exception_message(self, e: BaseException) -> ForwardMsg:
"""Create and return an Exception ForwardMsg."""
msg = ForwardMsg()
exception_utils.marshall(msg.delta.new_element.exception, e)
return msg
def _handle_git_information_request(self) -> None:
msg = ForwardMsg()
try:
from streamlit.git_util import GitRepo
repo = GitRepo(self._script_data.main_script_path)
repo_info = repo.get_repo_info()
if repo_info is None:
return
repository_name, branch, module = repo_info
repository_name = repository_name.removesuffix(".git")
msg.git_info_changed.repository = repository_name
msg.git_info_changed.branch = branch
msg.git_info_changed.module = module
msg.git_info_changed.untracked_files[:] = repo.untracked_files or []
msg.git_info_changed.uncommitted_files[:] = repo.uncommitted_files or []
if repo.is_head_detached:
msg.git_info_changed.state = GitInfo.GitStates.HEAD_DETACHED
elif repo.ahead_commits and len(repo.ahead_commits) > 0:
msg.git_info_changed.state = GitInfo.GitStates.AHEAD_OF_REMOTE
else:
msg.git_info_changed.state = GitInfo.GitStates.DEFAULT
_LOGGER.debug(
"Git information found. Name: %s, Branch: %s, Module: %s",
repository_name,
branch,
module,
)
self._enqueue_forward_msg(msg)
except Exception as ex:
# Users may never even install Git in the first place, so this
# error requires no action. It can be useful for debugging.
_LOGGER.debug("Obtaining Git information produced an error", exc_info=ex)
def _handle_rerun_script_request(
self, client_state: ClientState | None = None
) -> None:
"""Tell the ScriptRunner to re-run its script.
Parameters
----------
client_state : streamlit.proto.ClientState_pb2.ClientState | None
The ClientState protobuf to run the script with, or None
to use previous client state.
"""
self.request_rerun(client_state)
def _handle_stop_script_request(self) -> None:
"""Tell the ScriptRunner to stop running its script."""
self.request_script_stop()
def _handle_clear_cache_request(self) -> None:
"""Clear this app's cache.
Because this cache is global, it will be cleared for all users.
"""
caching.cache_data.clear()
caching.cache_resource.clear()
self._session_state.clear()
def _handle_app_heartbeat_request(self) -> None:
"""Handle an incoming app heartbeat.
The heartbeat indicates the frontend is active and keeps the
websocket from going idle and disconnecting.
The actual handler here is a noop
"""
pass
def _handle_set_run_on_save_request(self, new_value: bool) -> None:
"""Change our run_on_save flag to the given value.
The browser will be notified of the change.
Parameters
----------
new_value : bool
New run_on_save value
"""
self._run_on_save = new_value
self._enqueue_forward_msg(self._create_session_status_changed_message())
def _handle_file_urls_request(self, file_urls_request: FileURLsRequest) -> None:
"""Handle a file_urls_request BackMsg sent by the client."""
msg = ForwardMsg()
msg.file_urls_response.response_id = file_urls_request.request_id
upload_url_infos = self._uploaded_file_mgr.get_upload_urls(
self.id, file_urls_request.file_names
)
for upload_url_info in upload_url_infos:
msg.file_urls_response.file_urls.append(
FileURLs(
file_id=upload_url_info.file_id,
upload_url=upload_url_info.upload_url,
delete_url=upload_url_info.delete_url,
)
)
self._enqueue_forward_msg(msg)
async def _handle_deferred_file_request(self, request: DeferredFileRequest) -> None:
"""Handle a deferred_file_request BackMsg sent by the client.
Execute the deferred callable in a separate thread and send the URL back
to the frontend. This prevents blocking the main event loop if the callable
is slow.
"""
response = ForwardMsg()
response.deferred_file_response.file_id = request.file_id
try:
# Execute the deferred callable in a separate thread to avoid blocking
# the main event loop. This is critical for shared apps where a slow
# callable could freeze all sessions.
url = await asyncio.to_thread(
runtime.get_instance().media_file_mgr.execute_deferred,
request.file_id,
)
response.deferred_file_response.url = url
except Exception as e:
# Send error response if callable execution fails
_LOGGER.exception(
"Error executing deferred callable for file_id %s", request.file_id
)
response.deferred_file_response.error_msg = str(e)
self._enqueue_forward_msg(response)
def _populate_app_pages(
self, msg: NewSession, pages: dict[PageHash, PageInfo]
) -> None:
for page_script_hash, page_info in pages.items():
page_proto = msg.app_pages.add()
page_proto.page_script_hash = page_script_hash
page_proto.page_name = page_info["page_name"].replace("_", " ")
page_proto.url_pathname = page_info["page_name"]
page_proto.icon = page_info["icon"]
# Config.ToolbarMode.ValueType does not exist at runtime (only in the pyi stubs), so
# we need to use quotes.
# This field will be available at runtime as of protobuf 3.20.1, but
# we are using an older version.
# For details, see: https://github.com/protocolbuffers/protobuf/issues/8175
def _get_toolbar_mode() -> Config.ToolbarMode.ValueType:
config_key = "client.toolbarMode"
config_value = config.get_option(config_key)
enum_value: Config.ToolbarMode.ValueType | None = getattr(
Config.ToolbarMode, config_value.upper()
)
if enum_value is None:
allowed_values = ", ".join(k.lower() for k in Config.ToolbarMode.keys()) # noqa: SIM118
raise ValueError(
f"Config {config_key!r} expects to have one of "
f"the following values: {allowed_values}. "
f"Current value: {config_value}"
)
return enum_value
def _populate_config_msg(msg: Config) -> None:
msg.gather_usage_stats = config.get_option("browser.gatherUsageStats")
msg.max_cached_message_age = config.get_option("global.maxCachedMessageAge")
msg.allow_run_on_save = config.get_option("server.allowRunOnSave")
msg.hide_top_bar = config.get_option("ui.hideTopBar")
if config.get_option("client.showSidebarNavigation") is False:
msg.hide_sidebar_nav = True
msg.toolbar_mode = _get_toolbar_mode()
def _populate_theme_msg(msg: CustomThemeConfig, section: str = "theme") -> None:
theme_opts = config.get_options_for_section(section)
if all(val is None for val in theme_opts.values()):
return
for option_name, option_val in theme_opts.items():
# We need to ignore some config options here that need special handling
# and cannot directly be set on the protobuf.
if (
option_name
not in {
"base",
"font",
"fontFaces",
"codeFont",
"headingFont",
"headingFontSizes",
"headingFontWeights",
"chartCategoricalColors",
"chartSequentialColors",
}
and option_val is not None
):
setattr(msg, to_snake_case(option_name), option_val)
# NOTE: If unset, base and font will default to the protobuf enum zero
# values, which are BaseTheme.LIGHT and FontFamily.SANS_SERIF,
# respectively. This is why we both don't handle the cases explicitly and
# also only log a warning when receiving invalid base/font options.
base_map = {
"light": msg.BaseTheme.LIGHT,
"dark": msg.BaseTheme.DARK,
}
base = theme_opts.get("base", None)
if base is not None:
if base not in base_map:
_LOGGER.warning(
'"%s" is an invalid value for theme.base. Allowed values include %s. '
'Setting theme.base to "light".',
base,
list(base_map.keys()),
)
else:
msg.base = base_map[base]
# Handle font, codeFont, and headingFont config options and if they are
# specified with a source URL
msg = parse_fonts_with_source(
msg,
theme_opts.get("font", None),
theme_opts.get("codeFont", None),
theme_opts.get("headingFont", None),
section,
)
font_faces = theme_opts.get("fontFaces", None)
# If fontFaces was configured via config.toml, it's already a parsed list of
# dictionaries. However, if it was provided via env variable or via CLI arg,
# it's a json string that still needs to be parsed.
if isinstance(font_faces, str):
try:
font_faces = json.loads(font_faces)
except Exception as e:
_LOGGER.warning(
"Failed to parse the theme.fontFaces config option with json.loads: %s.",
font_faces,
exc_info=e,
)
font_faces = None
if font_faces is not None:
for font_face in font_faces:
try:
if "weight" in font_face:
font_face["weight_range"] = str(font_face["weight"])
del font_face["weight"]
msg.font_faces.append(ParseDict(font_face, FontFace()))
except Exception as e: # noqa: PERF203
_LOGGER.warning(
"Failed to parse the theme.fontFaces config option: %s.",
font_face,
exc_info=e,
)
heading_font_sizes = theme_opts.get("headingFontSizes", None)
# headingFontSizes is either an single string value (set for all headings) or
# a list of strings (set specific headings). However, if it was provided via env variable or via CLI arg,
# it's a json string that needs to be parsed.
if isinstance(heading_font_sizes, str):
heading_font_sizes = heading_font_sizes.strip().lower()
if heading_font_sizes.endswith(("px", "rem")):
# Handle the case where headingFontSizes is a single string value to be applied to all headings
heading_font_sizes = [heading_font_sizes] * 6
else:
# Handle the case where headingFontSizes is a json string (coming from CLI or env variable)
try:
heading_font_sizes = json.loads(heading_font_sizes)
except Exception as e:
_LOGGER.warning(
"Failed to parse the theme.headingFontSizes config option with json.loads: %s.",
heading_font_sizes,
exc_info=e,
)
heading_font_sizes = None
if heading_font_sizes is not None:
# Check that the list has between 1 and 6 values
if not heading_font_sizes or len(heading_font_sizes) > 6:
raise ValueError(
f"Config theme.headingFontSizes should have 1-6 values corresponding to h1-h6, "
f"but got {len(heading_font_sizes)}"
)
for size in heading_font_sizes:
try:
msg.heading_font_sizes.append(size)
except Exception as e: # noqa: PERF203
_LOGGER.warning(
"Failed to parse the theme.headingFontSizes config option: %s.",
size,
exc_info=e,
)
heading_font_weights = theme_opts.get("headingFontWeights", None)
# headingFontWeights is either an integer (set for all headings) or
# a list of integers (set specific headings). However, if it was provided via env variable or via CLI arg,
# it's a json string that needs to be parsed.
if isinstance(heading_font_weights, str):
try:
heading_font_weights = json.loads(heading_font_weights)
except Exception as e:
_LOGGER.warning(
"Failed to parse the theme.headingFontWeights config option with json.loads: %s.",
heading_font_weights,
exc_info=e,
)
heading_font_weights = None
if isinstance(heading_font_weights, int):
# Set all heading font weights to the same value
for _ in range(1, 7):
msg.heading_font_weights.append(heading_font_weights)
elif isinstance(heading_font_weights, list):
# Check that the list has between 1 and 6 values
if not heading_font_weights or len(heading_font_weights) > 6:
raise ValueError(
f"Config theme.headingFontWeights should have 1-6 values corresponding to h1-h6, "
f"but got {len(heading_font_weights)}"
)
# Ensure we have exactly 6 heading font weights (h1-h6), padding with 600 as default
heading_weights = heading_font_weights[:6] + [600] * (
6 - len(heading_font_weights)
)
for weight in heading_weights:
try:
msg.heading_font_weights.append(weight)
except Exception as e: # noqa: PERF203
_LOGGER.warning(
"Failed to parse the theme.headingFontWeights config option: %s.",
weight,
exc_info=e,
)
chart_categorical_colors = theme_opts.get("chartCategoricalColors", None)
# If chartCategoricalColors was configured via config.toml, it's already a list of
# strings. However, if it was provided via env variable or via CLI arg,
# it's a json string that needs to be parsed.
if isinstance(chart_categorical_colors, str):
try:
chart_categorical_colors = json.loads(chart_categorical_colors)
except json.JSONDecodeError as e:
_LOGGER.warning(
"Failed to parse the theme.chartCategoricalColors config option: %s.",
chart_categorical_colors,
exc_info=e,
)
chart_categorical_colors = None
if chart_categorical_colors is not None:
for color in chart_categorical_colors:
try:
msg.chart_categorical_colors.append(color)
except Exception as e: # noqa: PERF203
_LOGGER.warning(
"Failed to parse the theme.chartCategoricalColors config option: %s.",
color,
exc_info=e,
)
chart_sequential_colors = theme_opts.get("chartSequentialColors", None)
# If chartSequentialColors was configured via config.toml, it's already a list of
# strings. However, if it was provided via env variable or via CLI arg,
# it's a json string that needs to be parsed.
if isinstance(chart_sequential_colors, str):
try:
chart_sequential_colors = json.loads(chart_sequential_colors)
except json.JSONDecodeError as e:
_LOGGER.warning(
"Failed to parse the theme.chartSequentialColors config option: %s.",
chart_sequential_colors,
exc_info=e,
)
chart_sequential_colors = None
if chart_sequential_colors is not None:
# Check that the list has 10 color values
if len(chart_sequential_colors) != 10:
_LOGGER.error(
"Config theme.chartSequentialColors should have 10 color values, "
"but got %s. Defaulting to Streamlit's default colors.",
len(chart_sequential_colors),
)
for color in chart_sequential_colors:
try:
msg.chart_sequential_colors.append(color)
except Exception as e: # noqa: PERF203
_LOGGER.warning(
"Failed to parse the theme.chartSequentialColors config option: %s.",
color,
exc_info=e,
)
def _populate_user_info_msg(msg: UserInfo) -> None:
inst = Installation.instance()
msg.installation_id = inst.installation_id
msg.installation_id_v3 = inst.installation_id_v3
msg.installation_id_v4 = inst.installation_id_v4
| AppSession |
python | takluyver__flit | flit/wheel.py | {
"start": 225,
"end": 279
} | class ____(core_wheel.WheelBuilder):
pass
| WheelBuilder |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_bool.py | {
"start": 575,
"end": 645
} | class ____:
def __bool__(self):
x = True
return x | Bool2 |
python | sympy__sympy | sympy/printing/llvmjitcode.py | {
"start": 1114,
"end": 4293
} | class ____(Printer):
'''Convert expressions to LLVM IR'''
def __init__(self, module, builder, fn, *args, **kwargs):
self.func_arg_map = kwargs.pop("func_arg_map", {})
if not llvmlite:
raise ImportError("llvmlite is required for LLVMJITPrinter")
super().__init__(*args, **kwargs)
self.fp_type = ll.DoubleType()
self.module = module
self.builder = builder
self.fn = fn
self.ext_fn = {} # keep track of wrappers to external functions
self.tmp_var = {}
def _add_tmp_var(self, name, value):
self.tmp_var[name] = value
def _print_Number(self, n):
return ll.Constant(self.fp_type, float(n))
def _print_Integer(self, expr):
return ll.Constant(self.fp_type, float(expr.p))
def _print_Symbol(self, s):
val = self.tmp_var.get(s)
if not val:
# look up parameter with name s
val = self.func_arg_map.get(s)
if not val:
raise LookupError("Symbol not found: %s" % s)
return val
def _print_Pow(self, expr):
base0 = self._print(expr.base)
if expr.exp == S.NegativeOne:
return self.builder.fdiv(ll.Constant(self.fp_type, 1.0), base0)
if expr.exp == S.Half:
fn = self.ext_fn.get("sqrt")
if not fn:
fn_type = ll.FunctionType(self.fp_type, [self.fp_type])
fn = ll.Function(self.module, fn_type, "sqrt")
self.ext_fn["sqrt"] = fn
return self.builder.call(fn, [base0], "sqrt")
if expr.exp == 2:
return self.builder.fmul(base0, base0)
exp0 = self._print(expr.exp)
fn = self.ext_fn.get("pow")
if not fn:
fn_type = ll.FunctionType(self.fp_type, [self.fp_type, self.fp_type])
fn = ll.Function(self.module, fn_type, "pow")
self.ext_fn["pow"] = fn
return self.builder.call(fn, [base0, exp0], "pow")
def _print_Mul(self, expr):
nodes = [self._print(a) for a in expr.args]
e = nodes[0]
for node in nodes[1:]:
e = self.builder.fmul(e, node)
return e
def _print_Add(self, expr):
nodes = [self._print(a) for a in expr.args]
e = nodes[0]
for node in nodes[1:]:
e = self.builder.fadd(e, node)
return e
# TODO - assumes all called functions take one double precision argument.
# Should have a list of math library functions to validate this.
def _print_Function(self, expr):
name = expr.func.__name__
e0 = self._print(expr.args[0])
fn = self.ext_fn.get(name)
if not fn:
fn_type = ll.FunctionType(self.fp_type, [self.fp_type])
fn = ll.Function(self.module, fn_type, name)
self.ext_fn[name] = fn
return self.builder.call(fn, [e0], name)
def emptyPrinter(self, expr):
raise TypeError("Unsupported type for LLVM JIT conversion: %s"
% type(expr))
# Used when parameters are passed by array. Often used in callbacks to
# handle a variable number of parameters.
| LLVMJitPrinter |
python | hynek__structlog | tests/test_config.py | {
"start": 4561,
"end": 10963
} | class ____:
def test_repr(self):
"""
repr reflects all attributes.
"""
p = BoundLoggerLazyProxy(
None,
processors=[1, 2, 3],
context_class=dict,
initial_values={"foo": 42},
logger_factory_args=(4, 5),
)
assert (
"<BoundLoggerLazyProxy(logger=None, wrapper_class=None, "
"processors=[1, 2, 3], "
"context_class=<class 'dict'>, "
"initial_values={'foo': 42}, "
"logger_factory_args=(4, 5))>"
) == repr(p)
def test_returns_bound_logger_on_bind(self, proxy):
"""
bind gets proxied to the wrapped bound logger.
"""
assert isinstance(proxy.bind(), BoundLoggerBase)
def test_returns_bound_logger_on_new(self, proxy):
"""
new gets proxied to the wrapped bound logger.
"""
assert isinstance(proxy.new(), BoundLoggerBase)
def test_returns_bound_logger_on_try_unbind(self, proxy):
"""
try_unbind gets proxied to the wrapped bound logger.
"""
assert isinstance(proxy.try_unbind(), BoundLoggerBase)
def test_prefers_args_over_config(self):
"""
Configuration can be overridden by passing arguments.
"""
p = BoundLoggerLazyProxy(
None, processors=[1, 2, 3], context_class=dict
)
b = p.bind()
assert isinstance(b._context, dict)
assert [1, 2, 3] == b._processors
class Class:
def __init__(self, *args, **kw):
pass
def update(self, *args, **kw):
pass
configure(processors=[4, 5, 6], context_class=Class)
b = p.bind()
assert not isinstance(b._context, Class)
assert [1, 2, 3] == b._processors
def test_falls_back_to_config(self, proxy):
"""
Configuration is used if no arguments are passed.
"""
b = proxy.bind()
assert isinstance(b._context, _CONFIG.default_context_class)
assert _CONFIG.default_processors == b._processors
def test_bind_honors_initial_values(self):
"""
Passed initial_values are merged on binds.
"""
p = BoundLoggerLazyProxy(None, initial_values={"a": 1, "b": 2})
b = p.bind()
assert {"a": 1, "b": 2} == b._context
b = p.bind(c=3)
assert {"a": 1, "b": 2, "c": 3} == b._context
def test_bind_binds_new_values(self, proxy):
"""
Values passed to bind arrive in the context.
"""
b = proxy.bind(c=3)
assert {"c": 3} == b._context
def test_unbind_unbinds_from_initial_values(self):
"""
It's possible to unbind a value that came from initial_values.
"""
p = BoundLoggerLazyProxy(None, initial_values={"a": 1, "b": 2})
b = p.unbind("a")
assert {"b": 2} == b._context
def test_honors_wrapper_class(self):
"""
Passed wrapper_class is used.
"""
p = BoundLoggerLazyProxy(None, wrapper_class=Wrapper)
b = p.bind()
assert isinstance(b, Wrapper)
def test_honors_wrapper_from_config(self, proxy):
"""
Configured wrapper_class is used if not overridden.
"""
configure(wrapper_class=Wrapper)
b = proxy.bind()
assert isinstance(b, Wrapper)
def test_new_binds_only_initial_values_implicit_ctx_class(self, proxy):
"""
new() doesn't clear initial_values if context_class comes from config.
"""
proxy = BoundLoggerLazyProxy(None, initial_values={"a": 1, "b": 2})
b = proxy.new(foo=42)
assert {"a": 1, "b": 2, "foo": 42} == b._context
def test_new_binds_only_initial_values_explicit_ctx_class(self, proxy):
"""
new() doesn't clear initial_values if context_class is passed
explicitly..
"""
proxy = BoundLoggerLazyProxy(
None, initial_values={"a": 1, "b": 2}, context_class=dict
)
b = proxy.new(foo=42)
assert {"a": 1, "b": 2, "foo": 42} == b._context
def test_rebinds_bind_method(self, proxy):
"""
To save time, be rebind the bind method once the logger has been
cached.
"""
configure(cache_logger_on_first_use=True)
bind = proxy.bind
proxy.bind()
assert bind != proxy.bind
def test_does_not_cache_by_default(self, proxy):
"""
Proxy's bind method doesn't change by default.
"""
bind = proxy.bind
proxy.bind()
assert bind == proxy.bind
@pytest.mark.parametrize("cache", [True, False])
def test_argument_takes_precedence_over_configuration(self, cache):
"""
Passing cache_logger_on_first_use as an argument overrides config.
"""
configure(cache_logger_on_first_use=cache)
proxy = BoundLoggerLazyProxy(None, cache_logger_on_first_use=not cache)
bind = proxy.bind
proxy.bind()
if cache:
assert bind == proxy.bind
else:
assert bind != proxy.bind
def test_bind_doesnt_cache_logger(self):
"""
Calling configure() changes BoundLoggerLazyProxys immediately.
Previous uses of the BoundLoggerLazyProxy don't interfere.
"""
class F:
"New logger factory with a new attribute"
def info(self, *args):
return 5
proxy = BoundLoggerLazyProxy(None)
proxy.bind()
configure(logger_factory=F)
new_b = proxy.bind()
assert new_b.info("test") == 5
def test_emphemeral(self):
"""
Calling an unknown method proxy creates a new wrapped bound logger
first.
"""
class Foo(BoundLoggerBase):
def foo(self):
return 42
proxy = BoundLoggerLazyProxy(
None, wrapper_class=Foo, cache_logger_on_first_use=False
)
assert 42 == proxy.foo()
@pytest.mark.parametrize("proto", range(pickle.HIGHEST_PROTOCOL + 1))
def test_pickle(self, proto):
"""
Can be pickled and unpickled.
"""
bllp = BoundLoggerLazyProxy(None)
assert repr(bllp) == repr(pickle.loads(pickle.dumps(bllp, proto)))
| TestBoundLoggerLazyProxy |
python | streamlit__streamlit | lib/streamlit/runtime/scriptrunner_utils/script_requests.py | {
"start": 1038,
"end": 1500
} | class ____(Enum):
# The ScriptRunner should continue running its script.
CONTINUE = "CONTINUE"
# If the script is running, it should be stopped as soon
# as the ScriptRunner reaches an interrupt point.
# This is a terminal state.
STOP = "STOP"
# A script rerun has been requested. The ScriptRunner should
# handle this request as soon as it reaches an interrupt point.
RERUN = "RERUN"
@dataclass(frozen=True)
| ScriptRequestType |
python | huggingface__transformers | src/transformers/models/hubert/modeling_hubert.py | {
"start": 14706,
"end": 16068
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = HubertAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
config=config,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = HubertFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
| HubertEncoderLayer |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/locators.py | {
"start": 20110,
"end": 22432
} | class ____(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile(
"""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path), params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
| Page |
python | realpython__materials | python-class/shapes.py | {
"start": 14,
"end": 398
} | class ____:
def __set_name__(self, owner, name):
self._name = name
def __get__(self, instance, owner):
return instance.__dict__[self._name]
def __set__(self, instance, value):
if (not isinstance(value, int | float)) or value <= 0:
raise ValueError("positive number expected")
instance.__dict__[self._name] = value
| PositiveNumber |
python | spyder-ide__spyder | spyder/utils/clipboard_helper.py | {
"start": 229,
"end": 1687
} | class ____:
# Clipboard metadata
metadata_hash = None
metadata_indent = None
metadata_tab_stop_width_spaces = None
def get_current_hash(self):
clipboard = QApplication.clipboard()
return hash(str(clipboard.text()))
def get_line_indentation(self, text, tab_stop_width_spaces=None):
"""Get indentation for given line."""
if tab_stop_width_spaces:
text = text.replace("\t", " " * tab_stop_width_spaces)
return len(text)-len(text.lstrip())
def save_indentation(self, preceding_text, tab_stop_width_spaces=None):
"""
Save the indentation corresponding to the clipboard data.
Must be called right after copying.
"""
self.metadata_hash = self.get_current_hash()
self.metadata_indent = self.get_line_indentation(
preceding_text, tab_stop_width_spaces)
self.metadata_tab_stop_width_spaces = tab_stop_width_spaces
def remaining_lines_adjustment(self, preceding_text):
"""
Get remaining lines adjustments needed to keep multiline
pasted text consistant.
"""
if self.get_current_hash() == self.metadata_hash:
return (
self.get_line_indentation(
preceding_text,
self.metadata_tab_stop_width_spaces)
- self.metadata_indent)
return 0
CLIPBOARD_HELPER = ClipboardHelper()
| ClipboardHelper |
python | realpython__materials | celery-async-tasks/source_code_final/feedback/views.py | {
"start": 380,
"end": 457
} | class ____(TemplateView):
template_name = "feedback/success.html"
| SuccessView |
python | lepture__mistune | tests/test_misc.py | {
"start": 47,
"end": 5023
} | class ____(TestCase):
def test_none(self):
self.assertEqual(mistune.html(None), "")
def test_before_parse_hooks(self):
def _add_name(md, state):
state.env["name"] = "test"
md = mistune.create_markdown()
md.before_parse_hooks.append(_add_name)
state = md.block.state_cls()
md.parse("", state)
self.assertEqual(state.env["name"], "test")
def test_hard_wrap(self):
md = mistune.create_markdown(escape=False, hard_wrap=True)
result = md("foo\nbar")
expected = "<p>foo<br />\nbar</p>"
self.assertEqual(result.strip(), expected)
md = mistune.create_markdown(escape=False, hard_wrap=True, plugins=["speedup"])
result = md("foo\nbar")
self.assertEqual(result.strip(), expected)
def test_escape_html(self):
md = mistune.create_markdown(escape=True)
result = md("<div>1</div>")
expected = "<p><div>1</div></p>"
self.assertEqual(result.strip(), expected)
result = md("<em>1</em>")
expected = "<p><em>1</em></p>"
self.assertEqual(result.strip(), expected)
def test_harmful_links(self):
result = mistune.html("[h](javAscript:alert)")
expected = '<p><a href="#harmful-link">h</a></p>'
self.assertEqual(result.strip(), expected)
def test_ref_link(self):
result = mistune.html("[link][h]\n\n[h]: /foo")
expected = '<p><a href="/foo">link</a></p>'
self.assertEqual(result.strip(), expected)
def test_allow_harmful_protocols(self):
renderer = mistune.HTMLRenderer(allow_harmful_protocols=True)
md = mistune.Markdown(renderer)
result = md("[h](javascript:alert)")
expected = '<p><a href="javascript:alert">h</a></p>'
self.assertEqual(result.strip(), expected)
def test_allow_data_protocols(self):
renderer = mistune.HTMLRenderer(allow_harmful_protocols=["data:"])
md = mistune.Markdown(renderer)
result = md("[h](data:alert)")
expected = '<p><a href="data:alert">h</a></p>'
self.assertEqual(result.strip(), expected)
def test_use_plugin(self):
from mistune.plugins.url import url
md = mistune.Markdown(mistune.HTMLRenderer())
md.use(url)
def test_markdown_func(self):
result = mistune.markdown("**b**")
expected = "<p><strong>b</strong></p>\n"
self.assertEqual(result, expected)
# trigger to use cached parser
result = mistune.markdown("**b**")
self.assertEqual(result, expected)
def test_ast_output(self):
md = mistune.create_markdown(escape=False, renderer=None)
text = '# h1\n\nfoo **bar**\n\n`&<>"`'
result = md(text)
expected = [
{
"type": "heading",
"children": [{"type": "text", "raw": "h1"}],
"attrs": {"level": 1},
"style": "atx",
},
{"type": "blank_line"},
{
"type": "paragraph",
"children": [
{"type": "text", "raw": "foo "},
{"type": "strong", "children": [{"type": "text", "raw": "bar"}]},
],
},
{"type": "blank_line"},
{
"type": "paragraph",
"children": [
{"type": "codespan", "raw": '&<>"'},
],
},
]
self.assertEqual(result, expected)
def test_ast_url(self):
md = mistune.create_markdown(escape=False, renderer=None)
label = 'hi &<>"'
url = "https://example.com/foo?a=1&b=2"
text = "[{}]({})".format(label, url)
result = md(text)
expected = [
{
"type": "paragraph",
"children": [
{
"type": "link",
"children": [{"type": "text", "raw": label}],
"attrs": {"url": url},
},
],
},
]
self.assertEqual(result, expected)
def test_emsp(self):
md = mistune.create_markdown(escape=False, hard_wrap=True)
result = md("\u2003\u2003foo\nbar\n\n\u2003\u2003foobar")
expected = "<p>\u2003\u2003foo<br />\nbar</p>\n<p>\u2003\u2003foobar</p>"
self.assertEqual(result.strip(), expected)
def test_unicode_whitespace(self):
text = "# \u3000\u3000abc"
result = mistune.html(text)
expected = "<h1>\u3000\u3000abc</h1>\n"
self.assertEqual(result, expected)
def test_html_tag_text_following_list(self):
md = mistune.create_markdown(escape=False, hard_wrap=True)
result = md("foo\n- bar\n\ntable")
expected = "<p>foo</p>\n<ul>\n<li>bar</li>\n</ul>\n<p>table</p>"
self.assertEqual(result.strip(), expected)
| TestMiscCases |
python | getsentry__sentry | src/sentry/analytics/events/team_created.py | {
"start": 69,
"end": 267
} | class ____(analytics.Event):
user_id: int | None = None
default_user_id: int | str | None = None
organization_id: int
team_id: int
analytics.register(TeamCreatedEvent)
| TeamCreatedEvent |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_outline03.py | {
"start": 315,
"end": 1952
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("outline03.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""
Test the creation of a outlines in a XlsxWriter file. These tests are
based on the outline programs in the examples directory.
"""
workbook = Workbook(self.got_filename)
worksheet3 = workbook.add_worksheet("Outline Columns")
bold = workbook.add_format({"bold": 1})
data = [
["Month", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Total"],
["North", 50, 20, 15, 25, 65, 80],
["South", 10, 20, 30, 50, 50, 50],
["East", 45, 75, 50, 15, 75, 100],
["West", 15, 15, 55, 35, 20, 50],
]
worksheet3.set_row(0, None, bold)
worksheet3.set_column("A:A", 10, bold)
worksheet3.set_column("B:G", 6, None, {"level": 1})
worksheet3.set_column("H:H", 10)
for row, data_row in enumerate(data):
worksheet3.write_row(row, 0, data_row)
worksheet3.write("H2", "=SUM(B2:G2)", None, 255)
worksheet3.write("H3", "=SUM(B3:G3)", None, 210)
worksheet3.write("H4", "=SUM(B4:G4)", None, 360)
worksheet3.write("H5", "=SUM(B5:G5)", None, 190)
worksheet3.write("H6", "=SUM(H2:H5)", bold, 1015)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/map_test.py | {
"start": 6141,
"end": 58807
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
def _map_dataset_factory(self, components, apply_map, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(_test_combinations())
def testMapDataset(self, apply_map):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=14))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(_test_combinations_with_mode("graph"))
def testMapDatasetMultiThreaded(self, apply_map):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=18))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _parallel_map_dataset_factory(self, components, apply_map, count,
num_parallel_calls, buffer_size,
use_unbounded_threadpool=False):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn, num_parallel_calls=num_parallel_calls,
use_unbounded_threadpool=use_unbounded_threadpool)
dataset = dataset.prefetch(buffer_size).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(num_parallel_calls=1, buffer_size=1) +
combinations.combine(num_parallel_calls=1, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=4) +
combinations.combine(num_parallel_calls=8, buffer_size=8) +
combinations.combine(num_parallel_calls=8, buffer_size=16),
combinations.combine(use_unbounded_threadpool=[None, True, False])))
def testParallelMapDataset(self, apply_map, num_parallel_calls, buffer_size,
use_unbounded_threadpool):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._parallel_map_dataset_factory(components, apply_map, 14,
num_parallel_calls, buffer_size,
use_unbounded_threadpool))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(
combinations.times(
_test_combinations_with_mode("graph"),
combinations.combine(num_parallel_calls=1, buffer_size=1) +
combinations.combine(num_parallel_calls=1, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=4) +
combinations.combine(num_parallel_calls=8, buffer_size=8) +
combinations.combine(num_parallel_calls=8, buffer_size=16)))
def testParallelMapDatasetMultiThreaded(self, apply_map, num_parallel_calls,
buffer_size):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._parallel_map_dataset_factory(components, apply_map, 18,
num_parallel_calls, buffer_size))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
@combinations.generate(_test_combinations())
def testImplicitDisposeParallelMapDataset(self, apply_map):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._parallel_map_dataset_factory(components, apply_map, 1000,
100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testParallelMapUnspecifiedOutputSize(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset,
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testParallelMapError(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset,
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testPrefetchError(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset, lambda x: array_ops.check_numerics(x, "message")).prefetch(2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureIterator(self, apply_map):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return apply_map(dataset_ops.Dataset.range(10), _map_fn)
def _build_graph():
if context.executing_eagerly():
captured_iterator = iter(dataset_ops.Dataset.range(10))
else:
captured_iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10))
ds = _build_ds(captured_iterator)
return captured_iterator, ds
captured_iter, ds = _build_graph()
if not context.executing_eagerly():
self.evaluate(captured_iter.initializer)
get_next = self.getNext(ds, requires_initialization=True)
for i in range(10):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureHashTable(self, apply_map):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
dataset = apply_map(input_sentences,
lambda x: string_ops.string_split([x]).values)
dataset = apply_map(dataset, table.lookup)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(table.initializer)
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations_with_mode("graph"))
def testCaptureQueue(self, apply_map):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)
dataset = apply_map(dataset, lambda _: queue.dequeue())
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(enqueue_op)
self.evaluate(close_op)
for element in elements:
self.assertEqual(element, self.evaluate(get_next()))
# When the map function in `MapDataset` raises an OutOfRange error, TF1 and
# TF2 behave differently. TF1 raises an OutOfRangeError to signal the end of
# sequence while TF2 raises an InvalidArgumentError. This behavior is
# controlled by the `preserve_cardinality` argument of `map` transformation
# which is set to `True` for TF2 and `False` for TF1, which is for backward
# compatibility.
if tf2.enabled():
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
else:
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): Possible deadlock in eager mode, debug.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureSameResourceMultipleTimes(self, apply_map):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)
dataset = apply_map(dataset, lambda _: (queue.dequeue(), queue_2.dequeue()))
self.evaluate(enqueue_op)
self.evaluate(close_op)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testSeededStatefulOperatorIsProperlyStateful(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
fn = lambda _: random_ops.random_uniform((), seed=11)
dataset = apply_map(dataset, fn).batch(2)
get_next = self.getNext(dataset, requires_initialization=True)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(self.evaluate(get_next()))
self.assertLen(random_values, 10)
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
get_next = self.getNext(dataset, requires_initialization=True)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(self.evaluate(get_next()))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
@combinations.generate(_test_combinations())
def testStatefulMapKeepsStateAcrossIterators(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
fn = lambda _: random_ops.random_uniform((), seed=11)
dataset = apply_map(dataset, fn).repeat(1000).batch(10)
get_next = self.getNext(dataset)
random_values = self.evaluate(get_next())
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != self.evaluate(get_next())):
break
i += 1
self.assertLess(i, 99)
@combinations.generate(_test_combinations())
def testStatefulOperationInShortCircuit(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
def increment_fn(x):
counter_var.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, increment_fn)
options = options_lib.Options()
options.experimental_optimization.inject_prefetch = False
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
@combinations.generate(_test_combinations())
def testMapDict(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: {"foo": x * 2, "bar": x**2})
dataset = apply_map(dataset, lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset, expected_output=[i * 2 + i**2 for i in range(10)])
@combinations.generate(_test_combinations())
def testMapDataclass(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: MyDataclass(value1=x, value2=2 * x))
dataset = apply_map(dataset, lambda x: x.value1 + x.value2)
self.assertDatasetProduces(
dataset,
expected_output=[3 * x for x in range(10)],
)
@combinations.generate(_test_combinations())
def testMapMaskedTensor(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: MaskedTensor(mask=True, value=x))
dataset = apply_map(dataset, lambda x: 3 * x.value)
self.assertDatasetProduces(
dataset,
expected_output=[3 * x for x in range(10)],
)
@combinations.generate(_test_combinations())
def testMapDataclassWithInputAndOutput(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(MyDataclass(value1=1, value2=2))
dataset = apply_map(dataset, lambda x: (x.value1 * 5, x.value2))
dataset = apply_map(
dataset, lambda x, y: MaskedTensor(mask=True, value=x + y)
)
dataset = apply_map(
dataset, lambda m: NestedMaskedTensor(mask=False, value=m)
)
self.assertDatasetProduces(
dataset,
expected_output=[
NestedMaskedTensor(
mask=False, value=MaskedTensor(mask=True, value=7)
)
],
)
@combinations.generate(_test_combinations())
def testMapListOfDataclassObjects(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
# Creates a list of dataclass objects.
dataset = apply_map(
dataset,
lambda x: [ # pylint: disable=g-long-lambda
MyDataclass(value1=x, value2=1),
MyDataclass(value1=2, value2=2 * x),
],
)
# Takes a list of dataclass objects as input.
dataset = apply_map(dataset, lambda *x: x[0].value1 + x[1].value2)
self.assertDatasetProduces(
dataset,
expected_output=[3 * x for x in range(10)],
)
@combinations.generate(_test_combinations())
def testMapDictOfDataclassValues(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
# Creates a dict of {str -> dataclass}.
dataset = apply_map(
dataset,
lambda x: { # pylint: disable=g-long-lambda
"a": MyDataclass(value1=x, value2=1),
"b": MyDataclass(value1=2, value2=2 * x),
},
)
# Takes a dict of dataclass values as input.
dataset = apply_map(dataset, lambda x: x["a"].value1 + x["b"].value2)
self.assertDatasetProduces(
dataset,
expected_output=[3 * x for x in range(10)],
)
@combinations.generate(_test_combinations())
def testMapNestedMaskedTensorWithDataclassInput(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: MaskedTensor(mask=True, value=x))
dataset = apply_map(
dataset,
# Takes a MaskedTensor as input.
lambda x: NestedMaskedTensor(mask=False, value=x),
)
dataset = apply_map(dataset, lambda x: 5 * x.value.value)
self.assertDatasetProduces(
dataset,
expected_output=[5 * x for x in range(10)],
)
@combinations.generate(_test_combinations())
def testMapNestedMaskedTensorWithDataclassOutput(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(
dataset,
lambda x: NestedMaskedTensor( # pylint: disable=g-long-lambda
mask=False, value=MaskedTensor(mask=True, value=x)
),
)
# Return a MaskedTensor as the return value.
dataset = apply_map(dataset, lambda x: x.value)
dataset = apply_map(dataset, lambda x: 7 * x.value)
self.assertDatasetProduces(
dataset,
expected_output=[7 * x for x in range(10)],
)
@combinations.generate(_test_combinations())
def testMapNamedtuple(self, apply_map):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(10)
images = apply_map(labels, lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = collections.namedtuple("Example", ["label", "image"])
dataset_namedtuple = apply_map(dataset_tuple, example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = apply_map(dataset_tuple, preprocess_tuple)
dataset_namedtuple = apply_map(dataset_namedtuple, preprocess_namedtuple)
next_tuple = self.getNext(dataset_tuple)
next_namedtuple = self.getNext(dataset_namedtuple)
# make sure both datasets contain the same data
for i in range(10):
tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_namedtuple())
@combinations.generate(_test_combinations())
def testMapAttrs(self, apply_map):
if attr is None:
self.skipTest("attr module is not available.")
# construct dataset of tuples
labels = dataset_ops.Dataset.range(10)
images = apply_map(labels, lambda l: -l)
dataset = dataset_ops.Dataset.zip((labels, images))
@attr.s(cmp=True)
class Example:
label = attr.ib()
image = attr.ib()
dataset = apply_map(dataset, Example)
def preprocess(example):
example.image = 2 * example.image
return example
dataset = apply_map(dataset, preprocess)
get_next = self.getNext(dataset)
for i in range(10):
data = self.evaluate(get_next())
self.assertEqual(data, Example(i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testUseStepContainerInMap(self, apply_map):
row = np.arange(6)
dataset = dataset_ops.Dataset.from_tensors(row)
dataset = apply_map(dataset,
lambda elems: map_fn.map_fn(lambda x: x * x, elems))
self.assertDatasetProduces(dataset, expected_output=[row**2])
@combinations.generate(_test_combinations())
def testCaseAndCondInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return cond.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_case.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensor_slices(row)
return apply_map(dataset, lambda x: control_map_fn(x, num))
row = np.arange(6)
for num in [2, 3, 4]:
get_next = self.getNext(build_dataset(row, num))
for i in range(6):
self.assertEqual(
(i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaseInWhileInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), divide),
]
return control_flow_case.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensors(row)
return apply_map(
dataset,
lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))
row = np.arange(6)
for num in [2, 3, 4]:
get_next = self.getNext(build_dataset(row, num))
self.assertAllEqual(
[x // 2 if (num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaseAndCondInWhileInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return cond.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_case.case(
pred_fn_pairs, default=multiply, exclusive=True)
row = np.arange(6)
num = 2
dataset = dataset_ops.Dataset.from_tensors(row)
dataset = apply_map(
dataset,
lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))
get_next = self.getNext(dataset)
self.assertAllEqual([(x // 2 if x % 2 else x * 2) if
(num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testNestedListMapDataset(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([0, 1, 2]).repeat(10)
dataset = apply_map(dataset, lambda a: ([a[1], a[0] + a[2]], a[1]))
expected_output = [(np.array([1, 2]), 1)] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(
combinations.times(_test_combinations(),
combinations.combine(buffer_size=[1, 2, 3, 4])))
def testPrefetch(self, apply_map, buffer_size):
# We will use this event to test that `_map_py_func()` has been invoked a
# certain number of times (6 times, to be exact) after consuming fewer
# elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
# We can indirectly observe that varying the buffer size has the intended
# effect by observing when `ev` is set (on the 6th invocation of
# `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least one element
# to start the prefetching.
dataset = dataset_ops.Dataset.range(100)
dataset = apply_map(dataset, _map_fn).prefetch(buffer_size)
get_next = self.getNext(dataset)
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, self.evaluate(get_next()))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testReturnList(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: [x, constant_op.constant(37.0)])
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
@combinations.generate(_test_combinations())
def testMultiOutputPyFunc(self, apply_map):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _map_fn)
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
@combinations.generate(_test_combinations())
def testSparse(self, apply_map):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _sparse)
self.assertDatasetProduces(
dataset, expected_output=[_sparse(i) for i in range(10)])
@combinations.generate(_test_combinations())
def testSparseChain(self, apply_map):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _sparse)
dataset = apply_map(dataset, _check)
self.assertDatasetProduces(
dataset,
expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])
@combinations.generate(_test_combinations_with_mode("eager"))
def testSparseMapShapeInference(self, apply_map):
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=True)
dataset = apply_map(dataset, lambda x: x)
self.assertEqual((32, 3), dataset.element_spec.shape)
@combinations.generate(_test_combinations_with_mode("eager"))
def testSparseMapShapeInferencePartial(self, apply_map):
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=False)
dataset = apply_map(dataset, lambda x: x)
self.assertEqual([None, 3], dataset.element_spec.shape.as_list())
@combinations.generate(_test_combinations())
def testTensorArray(self, apply_map):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _tensor_array)
self.assertDatasetProduces(
dataset, expected_output=[list(range(i)) for i in range(10)])
@combinations.generate(_test_combinations())
def testTensorArrayChain(self, apply_map):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
def _check(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return x.identity()
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _tensor_array)
dataset = apply_map(dataset, _check)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(i)) for i in range(10)])
@combinations.generate(_test_combinations())
def testRagged(self, apply_map):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(5)
dataset = apply_map(dataset, _ragged)
self.assertDatasetProduces(
dataset,
expected_output=[ragged_factory_ops.constant([[i]]) for i in range(5)])
@combinations.generate(_test_combinations())
def testRaggedChain(self, apply_map):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
def _concat(i):
self.assertTrue(ragged_tensor.is_ragged(i))
return ragged_concat_ops.concat([i, i], 0)
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _ragged)
dataset = apply_map(dataset, _concat)
self.assertDatasetProduces(
dataset,
expected_output=[
self.evaluate(_concat(ragged_factory_ops.constant([[i]])))
for i in range(10)
])
@combinations.generate(_test_combinations_with_mode("graph"))
def testParallelMapOutOfRangeError(self, apply_map):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(105)
dataset = apply_map(
dataset,
lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
# When the map function in `MapDataset` raises an OutOfRange error, TF1 and
# TF2 behave differently. TF1 raises an OutOfRangeError to signal the end of
# sequence while TF2 raises an InvalidArgumentError. This behavior is
# controlled by the `preserve_cardinality` argument of `map` transformation
# which is set to `True` for TF2 and `False` for TF1, which is for backward
# compatibility.
if tf2.enabled():
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
else:
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testConstantOutput(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: [x, "hello", 10])
self.assertDatasetProduces(dataset, [(i, b"hello", 10) for i in range(10)])
@combinations.generate(test_base.graph_only_combinations())
def testWarnOnSeedFromOuterGraph(self):
with ops.Graph().as_default() as g:
g.seed = 10
warnings.simplefilter("always")
def _check_warning(caught_warnings, expected_result):
found_warning = False
for warning in caught_warnings:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertEqual(found_warning, expected_result)
# map_fun doesn't use seed, so no warning is generated.
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(math_ops.square)
_check_warning(w, False)
def random_func(x):
x = math_ops.add(x, 1)
random_ops.random_shuffle([x, math_ops.square(x)])
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(random_func)
_check_warning(w, True)
def random_func_seeded(x):
ops.get_default_graph().seed = None
random_ops.random_shuffle(x)
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(random_func_seeded)
_check_warning(w, False)
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(
lambda x: random_ops.random_shuffle(x, seed=37))
_check_warning(w, False)
@combinations.generate(_test_combinations())
def testNestedDatasetMap(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
dataset = apply_map(dataset, dataset_ops.Dataset.from_tensor_slices)
dataset = apply_map(dataset, lambda ds: ds.batch(3)).flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])
@combinations.generate(_test_combinations())
def testReturnValueError(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegex(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\)"):
_ = apply_map(dataset, lambda x: Foo)
@combinations.generate(test_base.default_test_combinations())
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
self.assertDatasetProduces(
dataset, expected_error=(errors.InvalidArgumentError, "Type mismatch"))
@combinations.generate(
combinations.times(
_test_combinations_with_mode("graph"),
combinations.combine(num_parallel_calls=[None, 12])))
def testNoInterOpParallelism(self, apply_map, num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = apply_map(dataset, _map_fn)
dataset._variant_tensor.op._set_attr("use_inter_op_parallelism",
attr_value_pb2.AttrValue(b=False))
get_next = self.getNext(dataset)
tids = self.evaluate(get_next())
self.assertTrue(all(tids[0] == tid for tid in tids))
@combinations.generate(
combinations.times(_test_combinations(), _short_circuit_test_cases(),
combinations.combine(num_parallel_calls=[None, 12])))
def testShortCircuit(self, apply_map, structure, fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat()
dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = fn(*self.evaluate(self.structuredElement(structure)))
else:
expected = fn(self.evaluate(self.structuredElement(structure)))
self.assertEqual(expected, self.evaluate(get_next()))
@combinations.generate(
combinations.times(_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testShortCircuitCapturedInput(self, apply_map, num_parallel_calls):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat()
dataset = apply_map(
dataset, lambda x: captured_t, num_parallel_calls=num_parallel_calls)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertEqual(42, self.evaluate(get_next()))
@combinations.generate(
combinations.combine(
tf_api_version=2,
mode=["eager", "graph"],
num_parallel_calls=[None, 12]))
def testPreserveCardinality(self, num_parallel_calls):
def py_fn(_):
raise StopIteration()
dataset = dataset_ops.Dataset.from_tensors(0).map(
lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),
num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
@combinations.generate(_test_combinations_with_mode("graph"))
def testCollectionCopy(self, apply_map):
w = variable_scope.get_variable("w", [])
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
def func(x):
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
return x
dataset = dataset_ops.Dataset.from_tensors(constant_op.constant(1.0))
_ = apply_map(dataset, func)
@combinations.generate(
combinations.times(
_test_combinations_with_mode_v1("graph"),
combinations.combine(num_parallel_calls=[None, 12])))
def testMapCancellation(self, apply_map, num_parallel_calls):
# Checks that a cancellation of is threaded through to map transformation.
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
def fn(_):
return queue.dequeue()
dataset = dataset_ops.Dataset.range(1)
dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset, requires_initialization=True)
with self.cached_session() as sess:
thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
# TODO(b/126553094): map doesnt work with variable defined inside function in
# eager mode, possible Graph tensors leak out of the function building context
# from function graph in eager mode as variables are created in init_scope.
@combinations.generate(test_base.graph_only_combinations())
def testCreateVariableInsideFunctionWithGetter(self):
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return counter_var.assign_add(1)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
if hasattr(dataset, "map_with_legacy_function"):
# NOTE: In the legacy function, resource is captured by value.
with self.assertRaisesWithPredicateMatch(
AttributeError, ".*Tensor.* object has no attribute 'assign_add'"
):
dataset.map_with_legacy_function(func)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(10):
self.assertEqual(i + 1, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureVariable(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))
options = options_lib.Options()
options.experimental_optimization.inject_prefetch = False
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i + 1, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureUninitializedVariableError(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
with self.assertRaises(errors.NotFoundError):
self.evaluate(get_next())
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureConstantsWithConflictingDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
with ops.device("/device:CPU:0"):
a = constant_op.constant(3.0)
with ops.device("/device:CPU:1"):
b = constant_op.constant(5.0)
def func(_):
return math_ops.add(a, b)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
expected_output = [8.0] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testReferenceVariablesWithMultipleDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
with ops.device("/device:CPU:0"):
a = variable_v1.VariableV1(3.0)
with ops.device("/device:CPU:1"):
b = variable_v1.VariableV1(5.0)
def func(_):
nonlocal a, b
return math_ops.add(a, b)
# NOTE: Use the legacy function implementation as eager function will
# convert RefVariables to ResourceVariables.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
self.evaluate(variables.global_variables_initializer())
expected_output = [8.0] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testResourceVariablesWithMultipleDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
with ops.device("/device:CPU:0"):
a_var = variable_scope.get_variable(
"a", (), dtypes.int32, use_resource=True)
a_var = math_ops.add(a_var, 1)
with ops.device("/device:CPU:1"):
b_var = variable_scope.get_variable(
"b", (), dtypes.int32, use_resource=True)
return math_ops.add(a_var, b_var)
g = ops.Graph()
with self.session(config=config, graph=g):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(
local_determinism=[None, True, False],
global_determinism=[True, False])))
def testDeterminismConfiguration(self, apply_map, local_determinism,
global_determinism):
expect_determinism = local_determinism or (local_determinism is None and
global_determinism)
elements = list(range(1000))
def dataset_fn(delay_ms):
def sleep(x):
time.sleep(delay_ms / 1000)
return x
def map_function(x):
if math_ops.equal(x, 0):
return script_ops.py_func(sleep, [x], x.dtype)
else:
return x
dataset = dataset_ops.Dataset.from_tensor_slices(elements)
dataset = apply_map(
dataset,
map_function,
num_parallel_calls=2,
deterministic=local_determinism)
opts = options_lib.Options()
opts.deterministic = global_determinism
dataset = dataset.with_options(opts)
return dataset
self.checkDeterminism(
dataset_fn, expect_determinism, expected_elements=elements)
@combinations.generate(_test_combinations())
def testNoneComponent(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors((42, None))
def map_function(x, y):
if y is None:
return x / 2
return x
dataset = apply_map(dataset, map_function)
self.assertDatasetProduces(dataset, expected_output=[21])
@combinations.generate(test_base.eager_only_combinations())
def testCheckpointLargeBuffer(self):
if (pywrap_sanitizers.is_asan_enabled() or
pywrap_sanitizers.is_tsan_enabled() or
pywrap_sanitizers.is_msan_enabled()):
self.skipTest("Skip to avoid OOM when using sanitizers.")
dataset = dataset_ops.Dataset.range(10).batch(2)
dataset = dataset.map(
# Create tensors of size 512M.
lambda seed: stateless_random_ops.stateless_random_uniform(
(128, 1024, 1024), seed, dtype=dtypes.float32
)
)
# Set parallelism to 5 to exceed the 2GB protobuf limit
dataset = dataset.map(lambda x: x * 2, num_parallel_calls=5)
iterator = iter(dataset)
next(iterator) # Request an element to fill the parallel map buffer
time.sleep(1) # Give buffers some time to fill
ckpt = trackable_utils.Checkpoint(iterator=iterator)
manager = checkpoint_management.CheckpointManager(
ckpt, self.get_temp_dir(), max_to_keep=1)
manager.save()
del dataset
del iterator
manager.restore_or_initialize()
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(
use_unbounded_threadpool=[True, False])))
def testAutotuneUseUnboundedThreadpool(self, use_unbounded_threadpool):
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.map(
lambda x: x * 2,
num_parallel_calls=dataset_ops.AUTOTUNE,
use_unbounded_threadpool=use_unbounded_threadpool,
deterministic=True,
name="map")
self.assertDatasetProduces(dataset, [x * 2 for x in range(100)])
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 1])))
def testName(self, num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensors(21).map(
lambda x: x * 2, num_parallel_calls=num_parallel_calls, name="map")
self.assertDatasetProduces(dataset, [42])
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 1])))
def testStatusMessage(self, num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensors(21).map(
lambda x: x // 0, num_parallel_calls=num_parallel_calls, name="map")
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r".*Error in user-defined function passed to .* transformation with "
r"iterator: Iterator::Root::.*"):
self.evaluate(get_next())
| MapTest |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 45592,
"end": 47501
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
host: str,
port: int,
database: str,
username: str,
replication_method: str,
password: Optional[str] = None,
jdbc_url_params: Optional[str] = None,
):
"""Airbyte Source for Scaffold Java Jdbc.
Documentation for this source is no longer available.
Args:
name (str): The name of the destination.
host (str): Hostname of the database.
port (int): Port of the database.
database (str): Name of the database.
username (str): Username to use to access the database.
password (Optional[str]): Password associated with the username.
jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3)
replication_method (str): Replication method to use for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
"""
self.host = check.str_param(host, "host")
self.port = check.int_param(port, "port")
self.database = check.str_param(database, "database")
self.username = check.str_param(username, "username")
self.password = check.opt_str_param(password, "password")
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
self.replication_method = check.str_param(replication_method, "replication_method")
super().__init__("Scaffold Java Jdbc", name)
| ScaffoldJavaJdbcSource |
python | walkccc__LeetCode | solutions/1842. Next Palindrome Using Same Digits/1842.py | {
"start": 0,
"end": 1052
} | class ____:
def nextPalindrome(self, num: str) -> str:
def nextPermutation(nums: list[int]) -> bool:
n = len(nums)
# From the back to the front, find the first num < nums[i + 1].
i = n - 2
while i >= 0:
if nums[i] < nums[i + 1]:
break
i -= 1
if i < 0:
return False
# From the back to the front, find the first num > nums[i] and swap it
# with nums[i].
for j in range(n - 1, i, -1):
if nums[j] > nums[i]:
nums[i], nums[j] = nums[j], nums[i]
break
def reverse(nums, l, r):
while l < r:
nums[l], nums[r] = nums[r], nums[l]
l += 1
r -= 1
# Reverse nums[i + 1..n - 1].
reverse(nums, i + 1, len(nums) - 1)
return True
n = len(num)
arr = [int(num[i]) for i in range(len(num) // 2)]
if not nextPermutation(arr):
return ''
s = ''.join([chr(ord('0') + a) for a in arr])
if n % 2 == 1:
return s + num[n // 2] + s[::-1]
return s + s[::-1]
| Solution |
python | redis__redis-py | tests/test_asyncio/test_lock.py | {
"start": 148,
"end": 9923
} | class ____:
@pytest_asyncio.fixture()
async def r_decoded(self, create_redis):
redis = await create_redis(decode_responses=True)
yield redis
await redis.flushall()
def get_lock(self, redis, *args, **kwargs):
kwargs["lock_class"] = Lock
return redis.lock(*args, **kwargs)
async def test_lock(self, r):
lock = self.get_lock(r, "foo")
assert await lock.acquire(blocking=False)
assert await r.get("foo") == lock.local.token
assert await r.ttl("foo") == -1
await lock.release()
assert await r.get("foo") is None
async def test_lock_token(self, r):
lock = self.get_lock(r, "foo")
await self._test_lock_token(r, lock)
async def test_lock_token_thread_local_false(self, r):
lock = self.get_lock(r, "foo", thread_local=False)
await self._test_lock_token(r, lock)
async def _test_lock_token(self, r, lock):
assert await lock.acquire(blocking=False, token="test")
assert await r.get("foo") == b"test"
assert lock.local.token == b"test"
assert await r.ttl("foo") == -1
await lock.release()
assert await r.get("foo") is None
assert lock.local.token is None
async def test_locked(self, r):
lock = self.get_lock(r, "foo")
assert await lock.locked() is False
await lock.acquire(blocking=False)
assert await lock.locked() is True
await lock.release()
assert await lock.locked() is False
async def _test_owned(self, client):
lock = self.get_lock(client, "foo")
assert await lock.owned() is False
await lock.acquire(blocking=False)
assert await lock.owned() is True
await lock.release()
assert await lock.owned() is False
lock2 = self.get_lock(client, "foo")
assert await lock.owned() is False
assert await lock2.owned() is False
await lock2.acquire(blocking=False)
assert await lock.owned() is False
assert await lock2.owned() is True
await lock2.release()
assert await lock.owned() is False
assert await lock2.owned() is False
async def test_owned(self, r):
await self._test_owned(r)
async def test_owned_with_decoded_responses(self, r_decoded):
await self._test_owned(r_decoded)
async def test_competing_locks(self, r):
lock1 = self.get_lock(r, "foo")
lock2 = self.get_lock(r, "foo")
assert await lock1.acquire(blocking=False)
assert not await lock2.acquire(blocking=False)
await lock1.release()
assert await lock2.acquire(blocking=False)
assert not await lock1.acquire(blocking=False)
await lock2.release()
async def test_timeout(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert await lock.acquire(blocking=False)
assert 8 < (await r.ttl("foo")) <= 10
await lock.release()
async def test_float_timeout(self, r):
lock = self.get_lock(r, "foo", timeout=9.5)
assert await lock.acquire(blocking=False)
assert 8 < (await r.pttl("foo")) <= 9500
await lock.release()
async def test_blocking(self, r):
blocking = False
lock = self.get_lock(r, "foo", blocking=blocking)
assert not lock.blocking
lock_2 = self.get_lock(r, "foo")
assert lock_2.blocking
async def test_blocking_timeout(self, r):
lock1 = self.get_lock(r, "foo")
assert await lock1.acquire(blocking=False)
bt = 0.2
sleep = 0.05
lock2 = self.get_lock(r, "foo", sleep=sleep, blocking_timeout=bt)
start = asyncio.get_running_loop().time()
assert not await lock2.acquire()
# The elapsed duration should be less than the total blocking_timeout
assert bt >= (asyncio.get_running_loop().time() - start) > bt - sleep
await lock1.release()
async def test_context_manager(self, r):
# blocking_timeout prevents a deadlock if the lock can't be acquired
# for some reason
async with self.get_lock(r, "foo", blocking_timeout=0.2) as lock:
assert await r.get("foo") == lock.local.token
assert await r.get("foo") is None
async def test_context_manager_raises_when_locked_not_acquired(self, r):
await r.set("foo", "bar")
with pytest.raises(LockError):
async with self.get_lock(r, "foo", blocking_timeout=0.1):
pass
async def test_context_manager_not_raise_on_release_lock_not_owned_error(self, r):
try:
async with self.get_lock(
r, "foo", timeout=0.1, raise_on_release_error=False
):
await asyncio.sleep(0.15)
except LockNotOwnedError:
pytest.fail("LockNotOwnedError should not have been raised")
with pytest.raises(LockNotOwnedError):
async with self.get_lock(
r, "foo", timeout=0.1, raise_on_release_error=True
):
await asyncio.sleep(0.15)
async def test_context_manager_not_raise_on_release_lock_error(self, r):
try:
async with self.get_lock(
r, "foo", timeout=0.1, raise_on_release_error=False
) as lock:
await lock.release()
except LockError:
pytest.fail("LockError should not have been raised")
with pytest.raises(LockError):
async with self.get_lock(
r, "foo", timeout=0.1, raise_on_release_error=True
) as lock:
await lock.release()
async def test_high_sleep_small_blocking_timeout(self, r):
lock1 = self.get_lock(r, "foo")
assert await lock1.acquire(blocking=False)
sleep = 60
bt = 1
lock2 = self.get_lock(r, "foo", sleep=sleep, blocking_timeout=bt)
start = asyncio.get_running_loop().time()
assert not await lock2.acquire()
# the elapsed timed is less than the blocking_timeout as the lock is
# unattainable given the sleep/blocking_timeout configuration
assert bt > (asyncio.get_running_loop().time() - start)
await lock1.release()
async def test_releasing_unlocked_lock_raises_error(self, r):
lock = self.get_lock(r, "foo")
with pytest.raises(LockError):
await lock.release()
async def test_releasing_lock_no_longer_owned_raises_error(self, r):
lock = self.get_lock(r, "foo")
await lock.acquire(blocking=False)
# manually change the token
await r.set("foo", "a")
with pytest.raises(LockNotOwnedError):
await lock.release()
# even though we errored, the token is still cleared
assert lock.local.token is None
async def test_extend_lock(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert await lock.acquire(blocking=False)
assert 8000 < (await r.pttl("foo")) <= 10000
assert await lock.extend(10)
assert 16000 < (await r.pttl("foo")) <= 20000
await lock.release()
async def test_extend_lock_replace_ttl(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert await lock.acquire(blocking=False)
assert 8000 < (await r.pttl("foo")) <= 10000
assert await lock.extend(10, replace_ttl=True)
assert 8000 < (await r.pttl("foo")) <= 10000
await lock.release()
async def test_extend_lock_float(self, r):
lock = self.get_lock(r, "foo", timeout=10.5)
assert await lock.acquire(blocking=False)
assert 10400 < (await r.pttl("foo")) <= 10500
old_ttl = await r.pttl("foo")
assert await lock.extend(10.5)
assert old_ttl + 10400 < (await r.pttl("foo")) <= old_ttl + 10500
await lock.release()
async def test_extending_unlocked_lock_raises_error(self, r):
lock = self.get_lock(r, "foo", timeout=10)
with pytest.raises(LockError):
await lock.extend(10)
async def test_extending_lock_with_no_timeout_raises_error(self, r):
lock = self.get_lock(r, "foo")
assert await lock.acquire(blocking=False)
with pytest.raises(LockError):
await lock.extend(10)
await lock.release()
async def test_extending_lock_no_longer_owned_raises_error(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert await lock.acquire(blocking=False)
await r.set("foo", "a")
with pytest.raises(LockNotOwnedError):
await lock.extend(10)
async def test_reacquire_lock(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert await lock.acquire(blocking=False)
assert await r.pexpire("foo", 5000)
assert await r.pttl("foo") <= 5000
assert await lock.reacquire()
assert 8000 < (await r.pttl("foo")) <= 10000
await lock.release()
async def test_reacquiring_unlocked_lock_raises_error(self, r):
lock = self.get_lock(r, "foo", timeout=10)
with pytest.raises(LockError):
await lock.reacquire()
async def test_reacquiring_lock_with_no_timeout_raises_error(self, r):
lock = self.get_lock(r, "foo")
assert await lock.acquire(blocking=False)
with pytest.raises(LockError):
await lock.reacquire()
await lock.release()
async def test_reacquiring_lock_no_longer_owned_raises_error(self, r):
lock = self.get_lock(r, "foo", timeout=10)
assert await lock.acquire(blocking=False)
await r.set("foo", "a")
with pytest.raises(LockNotOwnedError):
await lock.reacquire()
@pytest.mark.onlynoncluster
| TestLock |
python | protocolbuffers__protobuf | python/google/protobuf/internal/timestamp_test.py | {
"start": 591,
"end": 4287
} | class ____(unittest.TestCase):
def test_timestamp_integer_conversion(self):
self.assertEqual(1, timestamp.to_nanoseconds(timestamp.from_nanoseconds(1)))
self.assertEqual(-1, timestamp.to_seconds(timestamp.from_seconds(-1)))
self.assertEqual(
123, timestamp.to_milliseconds(timestamp.from_milliseconds(123))
)
self.assertEqual(
321, timestamp.to_microseconds(timestamp.from_microseconds(321))
)
def test_timestamp_current(self):
# It is not easy to check with current time. For test coverage only.
self.assertNotEqual(8 * 3600, timestamp.from_current_time().seconds)
def test_timestamp_json(self):
def check_timestamp(ts, text):
self.assertEqual(text, timestamp.to_json_string(ts))
parsed_ts = timestamp.from_json_string(text)
self.assertEqual(ts, parsed_ts)
message = timestamp_pb2.Timestamp()
message.seconds = 0
message.nanos = 0
check_timestamp(message, '1970-01-01T00:00:00Z')
message.nanos = 10000000
check_timestamp(message, '1970-01-01T00:00:00.010Z')
message.nanos = 10000
check_timestamp(message, '1970-01-01T00:00:00.000010Z')
def test_timestamp_datetime(self):
naive_utc_epoch = datetime.datetime(1970, 1, 1)
message = well_known_types_test_pb2.WKTMessage()
message.optional_timestamp = naive_utc_epoch
self.assertEqual(0, message.optional_timestamp.seconds) # pytype: disable=attribute-error
self.assertEqual(0, message.optional_timestamp.nanos) # pytype: disable=attribute-error
self.assertEqual(
naive_utc_epoch, timestamp.to_datetime(message.optional_timestamp) # pytype: disable=wrong-arg-types
)
def test_timstamp_construction(self):
message = well_known_types_test_pb2.WKTMessage(
optional_timestamp=datetime.datetime.today()
)
def test_repeated_timestamp_construction(self):
message = well_known_types_test_pb2.WKTMessage(
repeated_ts=[
datetime.datetime(2025, 1, 1),
datetime.datetime(1970, 1, 1),
timestamp_pb2.Timestamp(),
]
)
self.assertEqual(len(message.repeated_ts), 3)
self.assertEqual(
datetime.datetime(2025, 1, 1),
timestamp.to_datetime((message.repeated_ts[0])),
)
self.assertEqual(
datetime.datetime(1970, 1, 1),
timestamp.to_datetime((message.repeated_ts[1])),
)
self.assertEqual(timestamp_pb2.Timestamp(), message.repeated_ts[2])
def test_timestamp_sub_annotation(self):
t1 = timestamp_pb2.Timestamp()
t2 = timestamp_pb2.Timestamp()
dt = datetime.datetime.now()
td = datetime.timedelta(hours=0)
msg = well_known_types_test_pb2.WKTMessage(optional_duration=td)
# Timestamp - datetime
self.assertEqual(t1 - dt, t2 - dt)
# Timestamp - Timestamp
self.assertEqual(t1 - t2, t2 - t1)
# datetime - Timestamp
self.assertEqual(dt - t1, dt - t2)
# Timestamp - timedelta and Timestamp - Duration
self.assertEqual(t1 - td, t2 - msg.optional_duration)
def test_timestamp_add_annotation(self):
ts = timestamp_pb2.Timestamp()
td = datetime.timedelta(hours=0)
msg = well_known_types_test_pb2.WKTMessage(optional_duration=td)
# Timestamp + timedelta and timedelta + Timestamp
self.assertEqual(ts + td, td + ts)
# Timestamp + Duration and Duration + Timestamp
self.assertEqual(ts + msg.optional_duration, msg.optional_duration + ts)
def test_assign_duration_to_timestamp(self):
message = well_known_types_test_pb2.WKTMessage()
with self.assertRaises((TypeError)):
message.optional_timestamp = datetime.timedelta(microseconds=123)
if __name__ == '__main__':
unittest.main()
| TimestampTest |
python | tensorflow__tensorflow | tensorflow/python/ops/variable_scope.py | {
"start": 56209,
"end": 58988
} | class ____(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
if scope_name is None:
for k in self.variable_scopes_count:
self.variable_scopes_count[k] = 0
else:
startswith_check = scope_name + "/"
startswith_len = len(startswith_check)
for k in self.variable_scopes_count:
if k[:startswith_len] == startswith_check:
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope.
@compatibility(TF2)
Although it is a legacy `compat.v1` api,
`tf.compat.v1.get_variable` is compatible with eager
execution and `tf.function`
However, to maintain variable-scope based variable reuse
you will need to combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`. (Though
it will behave as if reuse is always set to `tf.compat.v1.AUTO_REUSE`.)
See the
[migration guide](https://www.tensorflow.org/guide/migrate/model_mapping)
for more info.
The TF2 equivalent, if you are just trying to track
variable name prefixes and not control `get_variable`-based variable reuse,
would be to use `tf.name_scope` and capture the output of opening the
scope (which represents the current name prefix).
For example:
```python
x = tf.name_scope('foo') as current_scope:
...
```
@end_compatibility
"""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
| _VariableScopeStore |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/commands/ci/checks.py | {
"start": 934,
"end": 2088
} | class ____:
errors: list[str] = field(default_factory=list)
messages: list[str] = field(default_factory=list)
def check_dagster_cloud_yaml(yaml_path: pathlib.Path) -> CheckResult:
result = CheckResult()
if not yaml_path.exists():
result.errors.append(f"No such file {yaml_path}")
return result
yaml_text = yaml_path.read_text()
if not yaml_text.strip():
result.errors.append(f"Unexpected blank file {yaml_path}")
return result
try:
parsed = load_dagster_cloud_yaml(yaml_path.read_text())
except pydantic.ValidationError as err:
for error in get_validation_errors(err):
result.errors.append(error)
return result
for location in parsed.locations:
if location.build and location.build.directory:
build_path = yaml_path.parent / location.build.directory
if not build_path.is_dir():
result.errors.append(
f"Build directory {build_path} not found for location"
f" {location.location_name} at {build_path.absolute()}"
)
return result
| CheckResult |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/base.py | {
"start": 54626,
"end": 54870
} | class ____(Enum):
NONE = "none"
UNKNOWN = "unknown"
CLIENTSIDE = "clientside"
SENTINEL_DEFAULT = "sentinel_default"
SERVERSIDE = "serverside"
IDENTITY = "identity"
SEQUENCE = "sequence"
| _SentinelDefaultCharacterization |
python | gevent__gevent | src/greentest/3.13/test_socket.py | {
"start": 232240,
"end": 235111
} | class ____(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
socket_helper.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(os_helper.TESTFN)
self.bind(self.sock, path)
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(os_helper.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(os_helper.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if os_helper.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(os_helper.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipIf(sys.platform in ('linux', 'android'),
'Linux behavior is tested by TestLinuxAbstractNamespace')
def testEmptyAddress(self):
# Test that binding empty address fails.
self.assertRaises(OSError, self.sock.bind, "")
| TestUnixDomain |
python | qdrant__qdrant-client | tests/congruence_tests/test_group_search.py | {
"start": 589,
"end": 13838
} | class ____:
__test__ = False
def __init__(self):
self.query_text = np.random.random(text_vector_size).tolist()
self.query_image = np.random.random(image_vector_size).tolist()
self.query_code = np.random.random(code_vector_size).tolist()
self.group_by = "rand_digit"
self.group_size = 1
self.limit = 10
def group_search(
self,
client: QdrantBase,
query_vector: Union[
types.NumpyArray,
Sequence[float],
tuple[str, list[float]],
types.NamedVector,
],
) -> models.GroupsResult:
using = None
if isinstance(query_vector, tuple):
using, query_vector = query_vector
return client.query_points_groups(
collection_name=COLLECTION_NAME,
query=query_vector,
using=using,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
group_by=self.group_by,
limit=self.limit,
group_size=self.group_size,
)
def group_search_text(self, client: QdrantBase) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
using="text",
query=self.query_text,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
group_by=self.group_by,
limit=self.limit,
group_size=self.group_size,
)
def group_search_text_single(self, client: QdrantBase) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
query=self.query_text,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
group_by=self.group_by,
limit=self.limit,
group_size=self.group_size,
)
def group_search_image(self, client: QdrantBase) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
using="image",
query=self.query_image,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
group_by=self.group_by,
limit=self.limit,
group_size=self.group_size,
)
def group_search_image_with_lookup(self, client: QdrantBase) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
query=self.query_image,
using="image",
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
group_by=self.group_by,
limit=self.limit,
group_size=self.group_size,
with_lookup=LOOKUP_COLLECTION_NAME,
)
def group_search_image_with_lookup_2(self, client: QdrantBase) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
using="image",
query=self.query_image,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
group_by=self.group_by,
limit=self.limit,
group_size=self.group_size,
with_lookup=models.WithLookup(
collection=LOOKUP_COLLECTION_NAME,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
with_vectors=["image"],
),
)
def group_search_code(self, client: QdrantBase) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
using="code",
query=self.query_code,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
group_by=self.group_by,
limit=self.limit,
group_size=self.group_size,
)
def group_search_score_threshold(self, client: QdrantBase) -> models.GroupsResult:
res1 = client.query_points_groups(
collection_name=COLLECTION_NAME,
using="text",
query=self.query_text,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
limit=self.limit,
group_by=self.group_by,
score_threshold=0.9,
group_size=self.group_size,
)
res2 = client.query_points_groups(
collection_name=COLLECTION_NAME,
using="text",
query=self.query_text,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
limit=self.limit,
group_by=self.group_by,
score_threshold=0.95,
group_size=self.group_size,
)
res3 = client.query_points_groups(
collection_name=COLLECTION_NAME,
using="text",
query=self.query_text,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
limit=self.limit,
group_by=self.group_by,
score_threshold=0.1,
group_size=self.group_size,
)
return models.GroupsResult(groups=res1.groups + res2.groups + res3.groups)
def group_search_text_select_payload(self, client: QdrantBase) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
using="text",
query=self.query_text,
with_payload=["text_array", "nested.id"],
limit=self.limit,
group_by=self.group_by,
group_size=self.group_size,
)
def group_search_payload_exclude(self, client: QdrantBase) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
using="text",
query=self.query_text,
with_payload=models.PayloadSelectorExclude(
exclude=["text_array", "nested.id", "city.geo", "rand_number"]
),
limit=self.limit,
group_by=self.group_by,
group_size=self.group_size,
)
def group_search_image_select_vector(self, client: QdrantBase) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
using="image",
query=self.query_image,
with_payload=False,
with_vectors=["image", "code"],
limit=self.limit,
group_by=self.group_by,
group_size=self.group_size,
)
def filter_group_search_text(
self, client: QdrantBase, query_filter: models.Filter
) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
using="text",
query=self.query_text,
query_filter=query_filter,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
limit=self.limit,
group_by=self.group_by,
group_size=self.group_size,
)
def filter_group_search_text_single(
self, client: QdrantBase, query_filter: models.Filter
) -> models.GroupsResult:
return client.query_points_groups(
collection_name=COLLECTION_NAME,
query=self.query_text,
query_filter=query_filter,
with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]),
with_vectors=True,
limit=self.limit,
group_by=self.group_by,
group_size=self.group_size,
)
def group_by_keys():
return ["id", "rand_digit", "two_words", "city.name", "maybe", "maybe_null"]
def test_group_search_types():
fixture_points = generate_fixtures(vectors_sizes=50)
vectors_config = models.VectorParams(size=50, distance=models.Distance.EUCLID)
searcher = TestGroupSearcher()
local_client = init_local()
init_client(local_client, fixture_points, vectors_config=vectors_config)
remote_client = init_remote()
init_client(remote_client, fixture_points, vectors_config=vectors_config)
query_vector_np = np.random.random(text_vector_size)
compare_client_results(
local_client,
remote_client,
searcher.group_search,
query_vector=query_vector_np,
)
query_vector_list = query_vector_np.tolist()
compare_client_results(
local_client, remote_client, searcher.group_search, query_vector=query_vector_list
)
delete_fixture_collection(local_client)
delete_fixture_collection(remote_client)
def test_simple_group_search():
fixture_points = generate_fixtures()
lookup_points = generate_fixtures(
num=7,
random_ids=False, # Less that group ids to test the empty lookups
)
searcher = TestGroupSearcher()
local_client = init_local()
init_client(local_client, fixture_points)
init_client(local_client, lookup_points, collection_name=LOOKUP_COLLECTION_NAME)
remote_client = init_remote()
init_client(remote_client, fixture_points)
init_client(remote_client, lookup_points, collection_name=LOOKUP_COLLECTION_NAME)
searcher.group_size = 1
searcher.limit = 2
for key in group_by_keys():
searcher.group_by = key
compare_client_results(local_client, remote_client, searcher.group_search_text)
searcher.group_size = 3
compare_client_results(local_client, remote_client, searcher.group_search_text)
compare_client_results(local_client, remote_client, searcher.group_search_image)
compare_client_results(local_client, remote_client, searcher.group_search_code)
compare_client_results(local_client, remote_client, searcher.group_search_image_with_lookup)
compare_client_results(local_client, remote_client, searcher.group_search_image_with_lookup_2)
compare_client_results(local_client, remote_client, searcher.group_search_score_threshold)
compare_client_results(local_client, remote_client, searcher.group_search_text_select_payload)
compare_client_results(local_client, remote_client, searcher.group_search_image_select_vector)
compare_client_results(local_client, remote_client, searcher.group_search_payload_exclude)
for i in range(100):
query_filter = one_random_filter_please()
try:
compare_client_results(
local_client,
remote_client,
searcher.filter_group_search_text,
query_filter=query_filter,
)
except AssertionError as e:
print(f"\nFailed with filter {query_filter}")
raise e
def test_single_vector():
fixture_points = generate_fixtures(num=200, vectors_sizes=text_vector_size)
searcher = TestGroupSearcher()
vectors_config = models.VectorParams(
size=text_vector_size,
distance=models.Distance.DOT,
)
local_client = init_local()
init_client(local_client, fixture_points, vectors_config=vectors_config)
remote_client = init_remote()
init_client(remote_client, fixture_points, vectors_config=vectors_config)
for group_size in (1, 5):
searcher.group_size = group_size
for i in range(100):
query_filter = one_random_filter_please()
try:
compare_client_results(
local_client,
remote_client,
searcher.filter_group_search_text_single,
query_filter=query_filter,
)
except AssertionError as e:
print(f"\nFailed with filter {query_filter}")
raise e
def test_search_with_persistence():
import tempfile
fixture_points = generate_fixtures()
searcher = TestGroupSearcher()
with tempfile.TemporaryDirectory() as tmpdir:
local_client = init_local(tmpdir)
init_client(local_client, fixture_points)
payload_update_filter = one_random_filter_please()
local_client.set_payload(COLLECTION_NAME, {"test": f"test"}, payload_update_filter)
del local_client
local_client_2 = init_local(tmpdir)
remote_client = init_remote()
init_client(remote_client, fixture_points)
remote_client.set_payload(COLLECTION_NAME, {"test": f"test"}, payload_update_filter)
payload_update_filter = one_random_filter_please()
local_client_2.set_payload(COLLECTION_NAME, {"test": "test2"}, payload_update_filter)
remote_client.set_payload(COLLECTION_NAME, {"test": "test2"}, payload_update_filter)
for i in range(10):
query_filter = one_random_filter_please()
try:
compare_client_results(
local_client_2,
remote_client,
searcher.filter_group_search_text,
query_filter=query_filter,
)
except AssertionError as e:
print(f"\nFailed with filter {query_filter}")
raise e
| TestGroupSearcher |
python | bokeh__bokeh | src/bokeh/core/property/vectorization.py | {
"start": 1908,
"end": 3097
} | class ____(Generic[T], Serializable):
value: T
transform: NotRequired[Transform] = Unspecified
units: NotRequired[str] = Unspecified
def to_serializable(self, serializer: Serializer) -> AnyRep:
return serializer.encode_struct(type="value", value=self.value, transform=self.transform, units=self.units)
@classmethod
def from_serializable(cls, rep: dict[str, AnyRep], deserializer: Deserializer) -> Value[Any]:
if "value" not in rep:
deserializer.error("expected 'value' field")
value = deserializer.decode(rep["value"])
transform = deserializer.decode(rep["transform"]) if "transform" in rep else Unspecified
units = deserializer.decode(rep["units"]) if "units" in rep else Unspecified
return Value(value, transform, units)
def __getitem__(self, key: str) -> Any:
if key == "value":
return self.value
elif key == "transform" and self.transform is not Unspecified:
return self.transform
elif key == "units" and self.units is not Unspecified:
return self.units
else:
raise KeyError(f"key '{key}' not found")
@dataclass
| Value |
python | PrefectHQ__prefect | src/prefect/logging/highlighters.py | {
"start": 413,
"end": 730
} | class ____(RegexHighlighter):
"""Apply style to urls."""
base_style = "url."
highlights: list[str] = [
r"(?P<web_url>(https|http|ws|wss):\/\/[0-9a-zA-Z\$\-\_\+\!`\(\)\,\.\?\/\;\:\&\=\%\#]*)",
r"(?P<local_url>(file):\/\/[0-9a-zA-Z\$\-\_\+\!`\(\)\,\.\?\/\;\:\&\=\%\#]*)",
]
| UrlHighlighter |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/orchestrator/orchestrator/logging/publish_connector_lifecycle.py | {
"start": 231,
"end": 720
} | class ____(str, Enum):
IN_PROGRESS = "in_progress"
SUCCESS = "success"
FAILED = "failed"
def __str__(self) -> str:
# convert to upper case
return self.value.replace("_", " ").upper()
def to_emoji(self) -> str:
if self == StageStatus.IN_PROGRESS:
return "🟡"
elif self == StageStatus.SUCCESS:
return "🟢"
elif self == StageStatus.FAILED:
return "🔴"
else:
return ""
| StageStatus |
python | PyCQA__pylint | pylint/extensions/mccabe.py | {
"start": 1490,
"end": 5821
} | class ____(Mccabe_PathGraphingAstVisitor): # type: ignore[misc]
def __init__(self) -> None:
super().__init__()
self._bottom_counter = 0
self.graph: PathGraph | None = None
def default(self, node: nodes.NodeNG, *args: Any) -> None:
for child in node.get_children():
self.dispatch(child, *args)
def dispatch(self, node: nodes.NodeNG, *args: Any) -> Any:
self.node = node
klass = node.__class__
meth = self._cache.get(klass)
if meth is None:
class_name = klass.__name__
meth = getattr(self.visitor, "visit" + class_name, self.default)
self._cache[klass] = meth
return meth(node, *args)
def visitFunctionDef(self, node: nodes.FunctionDef) -> None:
if self.graph is not None:
# closure
pathnode = self._append_node(node)
self.tail = pathnode
self.dispatch_list(node.body)
bottom = f"{self._bottom_counter}"
self._bottom_counter += 1
self.graph.connect(self.tail, bottom)
self.graph.connect(node, bottom)
self.tail = bottom
else:
self.graph = PathGraph(node)
self.tail = node
self.dispatch_list(node.body)
self.graphs[f"{self.classname}{node.name}"] = self.graph
self.reset()
visitAsyncFunctionDef = visitFunctionDef
def visitSimpleStatement(self, node: _StatementNodes) -> None:
self._append_node(node)
visitAssert = visitAssign = visitAugAssign = visitDelete = visitRaise = (
visitYield
) = visitImport = visitCall = visitSubscript = visitPass = visitContinue = (
visitBreak
) = visitGlobal = visitReturn = visitExpr = visitAwait = visitSimpleStatement
def visitWith(self, node: nodes.With) -> None:
self._append_node(node)
self.dispatch_list(node.body)
visitAsyncWith = visitWith
def visitMatch(self, node: nodes.Match) -> None:
self._subgraph(node, f"match_{id(node)}", node.cases)
def _append_node(self, node: _AppendableNodeT) -> _AppendableNodeT | None:
if not (self.tail and self.graph):
return None
self.graph.connect(self.tail, node)
self.tail = node
return node
def _subgraph(
self,
node: _SubGraphNodes,
name: str,
extra_blocks: Sequence[nodes.ExceptHandler | nodes.MatchCase] = (),
) -> None:
"""Create the subgraphs representing any `if`, `for` or `match` statements."""
if self.graph is None:
# global loop
self.graph = PathGraph(node)
self._subgraph_parse(node, node, extra_blocks)
self.graphs[f"{self.classname}{name}"] = self.graph
self.reset()
else:
self._append_node(node)
self._subgraph_parse(node, node, extra_blocks)
def _subgraph_parse(
self,
node: _SubGraphNodes,
pathnode: _SubGraphNodes,
extra_blocks: Sequence[nodes.ExceptHandler | nodes.MatchCase],
) -> None:
"""Parse `match`/`case` blocks, or the body and `else` block of `if`/`for`
statements.
"""
loose_ends = []
if isinstance(node, nodes.Match):
for case in extra_blocks:
if isinstance(case, nodes.MatchCase):
self.tail = node
self.dispatch_list(case.body)
loose_ends.append(self.tail)
loose_ends.append(node)
else:
self.tail = node
self.dispatch_list(node.body)
loose_ends.append(self.tail)
for extra in extra_blocks:
self.tail = node
self.dispatch_list(extra.body)
loose_ends.append(self.tail)
if node.orelse:
self.tail = node
self.dispatch_list(node.orelse)
loose_ends.append(self.tail)
else:
loose_ends.append(node)
if node and self.graph:
bottom = f"{self._bottom_counter}"
self._bottom_counter += 1
for end in loose_ends:
self.graph.connect(end, bottom)
self.tail = bottom
| PathGraphingAstVisitor |
python | huggingface__transformers | src/transformers/models/align/configuration_align.py | {
"start": 11855,
"end": 15448
} | class ____(PreTrainedConfig):
r"""
[`AlignConfig`] is the configuration class to store the configuration of a [`AlignModel`]. It is used to
instantiate a ALIGN model according to the specified arguments, defining the text model and vision model configs.
Instantiating a configuration with the defaults will yield a similar configuration to that of the ALIGN
[kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`AlignTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`AlignVisionConfig`].
projection_dim (`int`, *optional*, defaults to 640):
Dimensionality of text and vision projection layers.
temperature_init_value (`float`, *optional*, defaults to 1.0):
The initial value of the *temperature* parameter. Default is used as per the original ALIGN implementation.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import AlignConfig, AlignModel
>>> # Initializing a AlignConfig with kakaobrain/align-base style configuration
>>> configuration = AlignConfig()
>>> # Initializing a AlignModel (with random weights) from the kakaobrain/align-base style configuration
>>> model = AlignModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a AlignConfig from a AlignTextConfig and a AlignVisionConfig
>>> from transformers import AlignTextConfig, AlignVisionConfig
>>> # Initializing ALIGN Text and Vision configurations
>>> config_text = AlignTextConfig()
>>> config_vision = AlignVisionConfig()
>>> config = AlignConfig(text_config=config_text, vision_config=config_vision)
```"""
model_type = "align"
sub_configs = {"text_config": AlignTextConfig, "vision_config": AlignVisionConfig}
def __init__(
self,
text_config=None,
vision_config=None,
projection_dim=640,
temperature_init_value=1.0,
initializer_range=0.02,
**kwargs,
):
if text_config is None:
text_config = AlignTextConfig()
logger.info("`text_config` is `None`. Initializing the `AlignTextConfig` with default values.")
elif isinstance(text_config, dict):
text_config = AlignTextConfig(**text_config)
if vision_config is None:
vision_config = AlignVisionConfig()
logger.info("`vision_config` is `None`. initializing the `AlignVisionConfig` with default values.")
elif isinstance(vision_config, dict):
vision_config = AlignVisionConfig(**vision_config)
self.text_config = text_config
self.vision_config = vision_config
self.projection_dim = projection_dim
self.temperature_init_value = temperature_init_value
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = ["AlignTextConfig", "AlignVisionConfig", "AlignConfig"]
| AlignConfig |
python | pallets__jinja | tests/test_utils.py | {
"start": 419,
"end": 2964
} | class ____:
def test_simple(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d["a"]
d["d"] = 4
assert d.keys() == ["d", "a", "c"]
def test_values(self):
cache = LRUCache(3)
cache["b"] = 1
cache["a"] = 2
assert cache.values() == [2, 1]
def test_values_empty(self):
cache = LRUCache(2)
assert cache.values() == []
def test_pickleable(self):
cache = LRUCache(2)
cache["foo"] = 42
cache["bar"] = 23
cache["foo"]
for protocol in range(3):
copy = pickle.loads(pickle.dumps(cache, protocol))
assert copy.capacity == cache.capacity
assert copy._mapping == cache._mapping
assert copy._queue == cache._queue
@pytest.mark.parametrize("copy_func", [LRUCache.copy, shallow_copy])
def test_copy(self, copy_func):
cache = LRUCache(2)
cache["a"] = 1
cache["b"] = 2
copy = copy_func(cache)
assert copy._queue == cache._queue
copy["c"] = 3
assert copy._queue != cache._queue
assert copy.keys() == ["c", "b"]
def test_clear(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d.clear()
assert d.__getstate__() == {"capacity": 3, "_mapping": {}, "_queue": deque([])}
def test_repr(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
# Sort the strings - mapping is unordered
assert sorted(repr(d)) == sorted("<LRUCache {'a': 1, 'b': 2, 'c': 3}>")
def test_items(self):
"""Test various items, keys, values and iterators of LRUCache."""
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
assert d.items() == [("c", 3), ("b", 2), ("a", 1)]
assert d.keys() == ["c", "b", "a"]
assert d.values() == [3, 2, 1]
assert list(reversed(d)) == ["a", "b", "c"]
# Change the cache a little
d["b"]
d["a"] = 4
assert d.items() == [("a", 4), ("b", 2), ("c", 3)]
assert d.keys() == ["a", "b", "c"]
assert d.values() == [4, 2, 3]
assert list(reversed(d)) == ["c", "b", "a"]
def test_setdefault(self):
d = LRUCache(3)
assert len(d) == 0
assert d.setdefault("a") is None
assert d.setdefault("a", 1) is None
assert len(d) == 1
assert d.setdefault("b", 2) == 2
assert len(d) == 2
| TestLRUCache |
python | pytorch__pytorch | torch/_inductor/codegen/memory_planning.py | {
"start": 3670,
"end": 5706
} | class ____(AllocationTreeNode):
"""
Represents memory allocated to a given node in the allocation pool.
"""
node: BufferLike
live_range: LiveRange
size_hint: int
symbolic_size: sympy.Expr
allocated: bool = False
pool: Optional[AllocationPool] = None
offset: Optional[sympy.Expr] = None
earliest_available: Optional[float] = None
def __post_init__(self) -> None:
has_unbacked_sym = False
for s in self.node.get_layout().size:
if free_unbacked_symbols(s):
has_unbacked_sym = True
break
if has_unbacked_sym:
self.earliest_available = self.get_live_ranges().begin
@property
def device(self):
return self.node.get_device()
def get_live_ranges(self):
return LiveRanges([self.live_range])
def get_size_hint(self):
return self.size_hint
def get_symbolic_size(self):
return self.symbolic_size
def mark_allocated(self):
assert not self.allocated
self.allocated = True
def finalize(self, pool, offset):
assert self.pool is None and self.offset is None
self.pool = pool
self.offset = offset
return self
def codegen_alloc_from_pool(self, wrapper):
assert self.pool
node = self.node
shape = tuple(node.get_size())
stride = tuple(node.get_stride())
return wrapper.codegen_alloc_from_pool(
self.pool.name, self.offset, node.get_dtype(), shape, stride
)
def __repr__(self):
return (
f"{self.__class__.__name__}("
f"node={self.node.get_name()}, "
f"live_range={self.live_range}, "
f"size_hint={self.size_hint}, "
f"symbolic_size={self.symbolic_size}, "
f"pool={self.pool.name if self.pool else None}, "
f"offset={self.offset})"
)
def get_earliest_available(self):
return self.earliest_available
@dataclasses.dataclass
| Allocation |
python | coleifer__peewee | tests/sqlite.py | {
"start": 31575,
"end": 33006
} | class ____(BaseTestCase):
def test_virtual_model(self):
class Test(VirtualModel):
class Meta:
database = database
extension_module = 'ext1337'
legacy_table_names = False
options = {'huey': 'cat', 'mickey': 'dog'}
primary_key = False
class SubTest(Test): pass
self.assertSQL(Test._schema._create_table(), (
'CREATE VIRTUAL TABLE IF NOT EXISTS "test" '
'USING ext1337 '
'(huey=cat, mickey=dog)'), [])
self.assertSQL(SubTest._schema._create_table(), (
'CREATE VIRTUAL TABLE IF NOT EXISTS "sub_test" '
'USING ext1337 '
'(huey=cat, mickey=dog)'), [])
self.assertSQL(
Test._schema._create_table(huey='kitten', zaizee='cat'),
('CREATE VIRTUAL TABLE IF NOT EXISTS "test" '
'USING ext1337 (huey=kitten, mickey=dog, zaizee=cat)'), [])
def test_autoincrement_field(self):
class AutoIncrement(TestModel):
id = AutoIncrementField()
data = TextField()
class Meta:
database = database
self.assertSQL(AutoIncrement._schema._create_table(), (
'CREATE TABLE IF NOT EXISTS "auto_increment" '
'("id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, '
'"data" TEXT NOT NULL)'), [])
| TestSqliteExtensions |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/syntax.py | {
"start": 2550,
"end": 8618
} | class ____(QSyntaxHighlighter):
"""Syntax highlighter for the Python language.
"""
# Python keywords
keywords = [
'and', 'assert', 'break', 'class', 'continue', 'def',
'del', 'elif', 'else', 'except', 'exec', 'finally',
'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield',
'None', 'True', 'False', 'async', 'await',
]
# Python operators
operators = [
r'=',
# Comparison
r'==', r'!=', r'<', r'<=', r'>', r'>=',
# Arithmetic
r'\+', r'-', r'\*', r'/', r'//', r'\%', r'\*\*',
# In-place
r'\+=', r'-=', r'\*=', r'/=', r'\%=',
# Bitwise
r'\^', r'\|', r'\&', r'\~', r'>>', r'<<',
]
# Python braces
braces = [
r'\{', r'\}', r'\(', r'\)', r'\[', r'\]',
]
def __init__(self, document):
QSyntaxHighlighter.__init__(self, document)
# Multi-line strings (expression, flag, style)
# FIXME: The triple-quotes in these two lines will mess up the
# syntax highlighting from this point onward
self.tri_single = (QRegExp("'''"), 1, 'string2')
self.tri_double = (QRegExp('"""'), 2, 'string2')
rules = []
# Keyword, operator, and brace rules
rules += [(r'\b%s\b' % w, 0, 'keyword')
for w in PythonHighlighter.keywords]
rules += [(r'%s' % o, 0, 'operator')
for o in PythonHighlighter.operators]
rules += [(r'%s' % b, 0, 'brace')
for b in PythonHighlighter.braces]
# All other rules
rules += [
# 'self'
(r'\bself\b', 0, 'self'),
# 'def' followed by an identifier
(r'\bdef\b\s*(\w+)', 1, 'defclass'),
# 'class' followed by an identifier
(r'\bclass\b\s*(\w+)', 1, 'defclass'),
# Numeric literals
(r'\b[+-]?[0-9]+[lL]?\b', 0, 'numbers'),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b', 0, 'numbers'),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b', 0, 'numbers'),
# Double-quoted string, possibly containing escape sequences
(r'"[^"\\]*(\\.[^"\\]*)*"', 0, 'string'),
# Single-quoted string, possibly containing escape sequences
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, 'string'),
# From '#' until a newline
(r'#[^\n]*', 0, 'comment'),
]
# Build a QRegExp for each pattern
self.rules = [(QRegExp(pat), index, fmt)
for (pat, index, fmt) in rules]
@property
def styles(self):
app = QtWidgets.QApplication.instance()
return DARK_STYLES if app.property('darkMode') else LIGHT_STYLES
def highlightBlock(self, text):
"""Apply syntax highlighting to the given block of text."""
rules = self.rules.copy()
string_spans = []
# First: apply string rules and record spans
for expression, nth, format in rules:
if format not in ('string', 'string2'):
continue
format = self.styles[format]
for n, match in enumerate(re.finditer(expression, text)):
if n < nth:
continue
start, end = match.span()
self.setFormat(start, end - start, format)
string_spans.append((start, end))
# Then: apply other rules only if not in a string
for expression, nth, format in rules:
if format in ('string', 'string2'):
continue
format = self.styles[format]
for n, match in enumerate(re.finditer(expression, text)):
if n < nth:
continue
start, end = match.span()
if any(start < e and end > s for s, e in string_spans):
continue # Skip overlapping with string
self.setFormat(start, end - start, format)
self.applySearchHighlight(text)
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, *self.tri_single)
if not in_multiline:
in_multiline = self.match_multiline(text, *self.tri_double)
def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegExp`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
"""
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
start = delimiter.indexIn(text)
# Move past this match
add = delimiter.matchedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
end = delimiter.indexIn(text, start + add)
# Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = len(text) - start + add
# Apply formatting
self.setFormat(start, length, self.styles[style])
# Look for the next match
start = delimiter.indexIn(text, start + length)
# Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True
else:
return False
| PythonHighlighter |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard_python3/bundled-services/deferred/wsgi/main.py | {
"start": 1779,
"end": 2131
} | class ____(deferred.Handler):
"""Deferred task handler that adds additional logic."""
def post(self, environ):
print("Executing deferred task.")
return super().post(environ)
routes = {
"counter/increment": IncrementCounter,
"counter/get": ViewCounter,
"custom/path": CustomDeferredHandler(),
}
| CustomDeferredHandler |
python | allegroai__clearml | clearml/backend_api/services/v2_23/queues.py | {
"start": 22706,
"end": 23845
} | class ____(Response):
"""
Response of queues.add_task endpoint.
:param added: Number of tasks added (0 or 1)
:type added: int
"""
_service = "queues"
_action = "add_task"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"added": {
"description": "Number of tasks added (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, added: Optional[int] = None, **kwargs: Any) -> None:
super(AddTaskResponse, self).__init__(**kwargs)
self.added = added
@schema_property("added")
def added(self) -> Optional[int]:
return self._property_added
@added.setter
def added(self, value: Optional[int]) -> None:
if value is None:
self._property_added = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "added", six.integer_types)
self._property_added = value
| AddTaskResponse |
python | huggingface__transformers | src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py | {
"start": 5454,
"end": 8981
} | class ____:
"""
Padding cache for KyutaiSpeechToTextConv1d causal convolutions in order to support streaming via cache padding.
See: https://huggingface.co/papers/2005.06720 & https://huggingface.co/papers/2204.07064
A padding cache is a list of cached partial hidden states for each convolution layer.
Hidden states are cached from the previous call to the KyutaiSpeechToTextConv1d forward pass, given the padding size.
"""
def __init__(
self,
num_layers: int,
per_layer_padding: list[int],
per_layer_padding_mode: list[str],
per_layer_in_channels: list[int],
):
# ensure correct number of layers for each arg
from_args_num_layers = {len(per_layer_padding), len(per_layer_padding_mode), len(per_layer_in_channels)}
if len(from_args_num_layers) != 1 or from_args_num_layers.pop() != num_layers:
raise ValueError(
f"Expected `num_layers` ({num_layers}) values in `per_layer_padding`, `per_layer_padding_mode` and `per_layer_in_channels`"
)
elif not all(mode in ["constant", "replicate"] for mode in per_layer_padding_mode):
raise NotImplementedError(
"`padding_cache` is not supported for convolutions using other than `constant` or `replicate` padding mode"
)
self.per_layer_padding = per_layer_padding
self.per_layer_padding_mode = per_layer_padding_mode
self.per_layer_in_channels = per_layer_in_channels
self.per_layer_is_init = [True] * num_layers
self.padding_cache = [None] * num_layers
def update(self, hidden_states: torch.Tensor, layer_idx: int):
"""
Updates the padding cache with the new padding states for the layer `layer_idx` and returns the current cache.
Parameters:
hidden_states (`torch.Tensor`):
The hidden states to be partially cached.
layer_idx (`int`):
The index of the layer to cache the states for.
Returns:
`torch.Tensor` or `None`, the current padding cache.
"""
batch_size, dtype, device = hidden_states.shape[0], hidden_states.dtype, hidden_states.device
padding = self.per_layer_padding[layer_idx]
padding_mode = self.per_layer_padding_mode[layer_idx]
in_channels = self.per_layer_in_channels[layer_idx]
if self.padding_cache[layer_idx] is None:
if padding_mode == "constant":
current_cache = torch.zeros(
batch_size,
in_channels,
padding,
device=device,
dtype=dtype,
)
elif padding_mode == "replicate":
current_cache = (
torch.ones(
batch_size,
in_channels,
padding,
device=device,
dtype=dtype,
)
* hidden_states[..., :1]
)
else:
current_cache = self.padding_cache[layer_idx]
# update the cache
if padding > 0:
padding_states = hidden_states[:, :, -padding:]
else:
padding_states = torch.empty(batch_size, in_channels, padding, dtype=dtype, device=device)
self.padding_cache[layer_idx] = padding_states
return current_cache
| KyutaiSpeechToTextConv1dPaddingCache |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 209894,
"end": 210701
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of DeclineTopicSuggestion"""
__schema__ = github_schema
__field_names__ = ("repository_id", "name", "reason", "client_mutation_id")
repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId")
"""The Node ID of the repository."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of the suggested topic."""
reason = sgqlc.types.Field(sgqlc.types.non_null(TopicSuggestionDeclineReason), graphql_name="reason")
"""The reason why the suggested topic is declined."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeclineTopicSuggestionInput |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/ext/hybrid/hybrid_four.py | {
"start": 380,
"end": 603
} | class ____(Comparator[str]):
def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501
return func.lower(self.__clause_element__()) == func.lower(other)
| CaseInsensitiveComparator |
python | numpy__numpy | numpy/f2py/tests/test_modules.py | {
"start": 1480,
"end": 1853
} | class ____(util.F2PyTest):
module_name = "example"
sources = [
util.getpath("tests", "src", "modules", "gh25337", "data.f90"),
util.getpath("tests", "src", "modules", "gh25337", "use_data.f90"),
]
def test_gh25337(self):
self.module.data.set_shift(3)
assert "data" in dir(self.module)
@pytest.mark.slow
| TestModuleAndSubroutine |
python | pydantic__pydantic | pydantic/types.py | {
"start": 16665,
"end": 17708
} | class ____(BaseModel):
finite: FiniteFloat
m = Model(finite=1.0)
print(m)
#> finite=1.0
```
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BYTES TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def conbytes(
*,
min_length: int | None = None,
max_length: int | None = None,
strict: bool | None = None,
) -> type[bytes]:
"""A wrapper around `bytes` that allows for additional constraints.
Args:
min_length: The minimum length of the bytes.
max_length: The maximum length of the bytes.
strict: Whether to validate the bytes in strict mode.
Returns:
The wrapped bytes type.
"""
return Annotated[ # pyright: ignore[reportReturnType]
bytes,
Strict(strict) if strict is not None else None,
annotated_types.Len(min_length or 0, max_length),
]
StrictBytes = Annotated[bytes, Strict()]
"""A bytes that must be validated in strict mode."""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ STRING TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@_dataclasses.dataclass(frozen=True)
| Model |
python | openai__openai-python | src/openai/types/beta/realtime/realtime_response_status.py | {
"start": 385,
"end": 1326
} | class ____(BaseModel):
error: Optional[Error] = None
"""
A description of the error that caused the response to fail, populated when the
`status` is `failed`.
"""
reason: Optional[Literal["turn_detected", "client_cancelled", "max_output_tokens", "content_filter"]] = None
"""The reason the Response did not complete.
For a `cancelled` Response, one of `turn_detected` (the server VAD detected a
new start of speech) or `client_cancelled` (the client sent a cancel event). For
an `incomplete` Response, one of `max_output_tokens` or `content_filter` (the
server-side safety filter activated and cut off the response).
"""
type: Optional[Literal["completed", "cancelled", "incomplete", "failed"]] = None
"""
The type of error that caused the response to fail, corresponding with the
`status` field (`completed`, `cancelled`, `incomplete`, `failed`).
"""
| RealtimeResponseStatus |
python | PyCQA__pylint | tests/functional/n/not_context_manager.py | {
"start": 1941,
"end": 2562
} | class ____(ManagerMixin):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Test a false positive with returning a generator
# from a context manager.
def generator():
yield 42
@contextmanager
def context_manager_returning_generator():
return generator()
with context_manager_returning_generator():
pass
FIRST = [context_manager_returning_generator()]
with FIRST[0]:
pass
def other_indirect_func():
return generator()
def not_context_manager():
return other_indirect_func()
with not_context_manager(): # [not-context-manager]
pass
| FullContextManager |
python | huggingface__transformers | src/transformers/models/smolvlm/modeling_smolvlm.py | {
"start": 2569,
"end": 6721
} | class ____(nn.Module):
"""
This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable
resolution.
The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://huggingface.co/papers/2307.06304)
which allows treating images in their native aspect ratio and without the need to resize them to the same
fixed size. In particular, we start from the original pre-trained SigLIP model
(which uses images of fixed-size square images) and adapt it by training on images of variable resolutions.
"""
def __init__(self, config: SmolVLMVisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
padding="valid",
)
self.num_patches_per_side = self.image_size // self.patch_size
self.num_patches = self.num_patches_per_side**2
self.num_positions = self.num_patches
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor:
batch_size, _, max_im_h, max_im_w = pixel_values.shape
patch_embeds = self.patch_embedding(pixel_values)
embeddings = patch_embeds.flatten(2).transpose(1, 2)
max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
boundaries = torch.arange(
1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side, device=pixel_values.device
)
position_ids = torch.full(
size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0, device=pixel_values.device
)
for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
nb_patches_h = p_attn_mask[:, 0].sum()
nb_patches_w = p_attn_mask[0].sum()
step_h = 1.0 / nb_patches_h
step_w = 1.0 / nb_patches_w
h_indices = torch.arange(nb_patches_h, device=position_ids.device, dtype=torch.float32)
w_indices = torch.arange(nb_patches_w, device=position_ids.device, dtype=torch.float32)
fractional_coords_h = h_indices * step_h
fractional_coords_w = w_indices * step_w
fractional_coords_h = torch.clamp(fractional_coords_h, max=(1.0 - 1e-6))
fractional_coords_w = torch.clamp(fractional_coords_w, max=(1.0 - 1e-6))
fractional_coords_h = fractional_coords_h.to(pixel_values.dtype)
fractional_coords_w = fractional_coords_w.to(pixel_values.dtype)
bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
position_ids[batch_idx][p_attn_mask.view(-1)] = pos_ids
embeddings = embeddings + self.position_embedding(position_ids)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| SmolVLMVisionEmbeddings |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_collections.py | {
"start": 1468,
"end": 3246
} | class ____(__TestCase):
def _superset_test(self, a, b):
self.assertGreaterEqual(
set(dir(a)),
set(dir(b)),
'{a} should have all the methods of {b}'.format(
a=a.__name__,
b=b.__name__,
),
)
def _copy_test(self, obj):
# Test internal copy
obj_copy = obj.copy()
self.assertIsNot(obj.data, obj_copy.data)
self.assertEqual(obj.data, obj_copy.data)
# Test copy.copy
obj.test = [1234] # Make sure instance vars are also copied.
obj_copy = copy.copy(obj)
self.assertIsNot(obj.data, obj_copy.data)
self.assertEqual(obj.data, obj_copy.data)
self.assertIs(obj.test, obj_copy.test)
def test_str_protocol(self):
self._superset_test(UserString, str)
def test_list_protocol(self):
self._superset_test(UserList, list)
def test_dict_protocol(self):
self._superset_test(UserDict, dict)
def test_list_copy(self):
obj = UserList()
obj.append(123)
self._copy_test(obj)
def test_dict_copy(self):
obj = UserDict()
obj[123] = "abc"
self._copy_test(obj)
def test_dict_missing(self):
with torch._dynamo.error_on_graph_break(False):
class A(UserDict):
def __missing__(self, key):
return 456
self.assertEqual(A()[123], 456)
# get() ignores __missing__ on dict
self.assertIs(A().get(123), None)
################################################################################
### ChainMap (helper class for configparser and the string module)
################################################################################
| TestUserObjects |
python | django__django | tests/migrations/test_migrations_squashed_double/0004_auto.py | {
"start": 43,
"end": 304
} | class ____(migrations.Migration):
dependencies = [("migrations", "0002_auto")]
operations = [
migrations.AlterField(
model_name="a",
name="foo",
field=models.BooleanField(default=False),
),
]
| Migration |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.