language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/tests/test_autoscaler_drain_node_api.py | {
"start": 799,
"end": 3686
} | class ____(AutoscalingCluster):
"""AutoscalingCluster modified to used the above MockFakeProvider."""
def _generate_config(
self, head_resources, worker_node_types, autoscaler_v2: bool = False
):
config = super()._generate_config(
head_resources, worker_node_types, autoscaler_v2=autoscaler_v2
)
config["provider"]["type"] = "external"
config["provider"][
"module"
] = "ray.tests.test_autoscaler_drain_node_api.MockFakeProvider"
return config
@pytest.mark.skipif(platform.system() == "Windows", reason="Failing on Windows.")
@pytest.mark.parametrize("autoscaler_v2", [False, True], ids=["v1", "v2"])
def test_drain_api(autoscaler_v2, shutdown_only):
"""E2E test of the autoscaler's use of the DrainNode API.
Adapted from test_autoscaler_fake_multinode.py.
The strategy is to mock out Ray node process termination in
FakeMultiNodeProvider, leaving node termination to the DrainNode API.
Scale-down is verified by `ray.cluster_resources`. It is verified that
no removed_node errors are issued adter scale-down.
Validity of this test depends on the current implementation of DrainNode.
DrainNode currently works by asking the GCS to de-register and shut down
Ray nodes.
"""
# Autoscaling cluster with Ray process termination mocked out in the node
# provider.
cluster = MockAutoscalingCluster(
head_resources={"CPU": 1},
worker_node_types={
"gpu_node": {
"resources": {
"CPU": 1,
"GPU": 1,
"object_store_memory": 1024 * 1024 * 1024,
},
"node_config": {},
"min_workers": 0,
"max_workers": 2,
},
},
autoscaler_v2=autoscaler_v2,
)
try:
cluster.start()
ray.init("auto")
# Triggers the addition of a GPU node.
@ray.remote(num_gpus=1)
def f():
print("gpu ok")
ray.get(f.remote())
# Verify scale-up
wait_for_condition(lambda: ray.cluster_resources().get("GPU", 0) == 1)
# Sleep for double the idle timeout of 6 seconds.
time.sleep(12)
# Verify scale-down
wait_for_condition(lambda: ray.cluster_resources().get("GPU", 0) == 0)
# Check that no errors were raised while draining nodes.
# (Logic copied from test_failure4::test_gcs_drain.)
try:
p = init_error_pubsub()
errors = get_error_message(
p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=5
)
assert len(errors) == 0
finally:
p.close()
finally:
cluster.shutdown()
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| MockAutoscalingCluster |
python | getsentry__sentry | src/sentry/testutils/metrics_backend.py | {
"start": 124,
"end": 645
} | class ____:
use_case_id = UseCaseID.TRANSACTIONS
org_id = 2
project_id = 1
metric_name = "measurements.speed"
set_values = [5, 3]
counter_value = 5
dist_values = [5, 3]
metrics_tags = {"a": "b"}
retention_days = 90
unit = "millisecond"
def get_mri(
self, metric_name: str, metric_type: str, use_case_id: UseCaseID, unit: str | None
) -> str:
mri_string = build_mri(metric_name, metric_type, use_case_id, unit)
return mri_string
| GenericMetricsTestMixIn |
python | pandas-dev__pandas | asv_bench/benchmarks/inference.py | {
"start": 6897,
"end": 7827
} | class ____:
params = [True, False]
param_names = ["cache"]
def setup(self, cache):
N = 10000
self.unique_numeric_seconds = list(range(N))
self.dup_numeric_seconds = [1000] * N
self.dup_string_dates = ["2000-02-11"] * N
self.dup_string_with_tz = ["2000-02-11 15:00:00-0800"] * N
def time_unique_seconds_and_unit(self, cache):
to_datetime(self.unique_numeric_seconds, unit="s", cache=cache)
def time_dup_seconds_and_unit(self, cache):
to_datetime(self.dup_numeric_seconds, unit="s", cache=cache)
def time_dup_string_dates(self, cache):
to_datetime(self.dup_string_dates, cache=cache)
def time_dup_string_dates_and_format(self, cache):
to_datetime(self.dup_string_dates, format="%Y-%m-%d", cache=cache)
def time_dup_string_tzoffset_dates(self, cache):
to_datetime(self.dup_string_with_tz, cache=cache)
| ToDatetimeCache |
python | huggingface__transformers | tests/models/musicgen_melody/test_modeling_musicgen_melody.py | {
"start": 18970,
"end": 22782
} | class ____:
def __init__(
self,
parent,
batch_size=3, # need batch_size != num_hidden_layers because of #29297
seq_length=7,
is_training=True,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=100,
pad_token_id=99,
bos_token_id=99,
num_codebooks=4,
num_filters=4,
codebook_size=128,
conditional_seq_length=3,
chroma_length=24,
audio_channels=1,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.num_codebooks = num_codebooks
self.num_filters = num_filters
self.codebook_size = codebook_size
self.conditional_seq_length = conditional_seq_length
self.chroma_length = chroma_length
self.encoder_seq_length = conditional_seq_length + seq_length
self.audio_channels = audio_channels
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.conditional_seq_length], self.vocab_size)
decoder_input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_musicgen_melody_inputs_dict(config, input_ids, decoder_input_ids=decoder_input_ids)
return config, inputs_dict
def get_config(self):
text_encoder_config = T5Config(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
d_ff=self.intermediate_size,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
)
audio_encoder_config = EncodecConfig(
hidden_size=self.vocab_size,
compress=1,
num_filters=self.num_filters,
codebook_size=self.codebook_size,
codebook_dim=self.vocab_size,
)
decoder_config = MusicgenMelodyDecoderConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
ffn_dim=self.intermediate_size,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.bos_token_id,
bos_token_id=self.bos_token_id,
num_codebooks=self.num_codebooks,
tie_word_embeddings=False,
audio_channels=self.audio_channels,
)
config = MusicgenMelodyConfig(
text_encoder_config, audio_encoder_config, decoder_config, chroma_length=self.chroma_length
)
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
@require_torch
# Copied from tests.models.musicgen.test_modeling_musicgen.MusicgenTest with Musicgen->MusicgenMelody, musicgen->musicgen_melody, EncoderDecoder->DecoderOnly, input_values->input_features
| MusicgenMelodyTester |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0063_extend_domain_from_timestamp_model.py | {
"start": 156,
"end": 823
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0062_add_ssl_status"),
]
operations = [
migrations.AddField(
model_name="domain",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, null=True, verbose_name="created"
),
),
migrations.AddField(
model_name="domain",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
]
| Migration |
python | MongoEngine__mongoengine | tests/fields/test_object_id_field.py | {
"start": 164,
"end": 1063
} | class ____(MongoDBTestCase):
def test_storage(self):
class MyDoc(Document):
oid = ObjectIdField()
doc = MyDoc(oid=ObjectId())
doc.save()
assert get_as_pymongo(doc) == {"_id": doc.id, "oid": doc.oid}
def test_constructor_converts_str_to_ObjectId(self):
class MyDoc(Document):
oid = ObjectIdField()
doc = MyDoc(oid=str(ObjectId()))
assert isinstance(doc.oid, ObjectId)
def test_validation_works(self):
class MyDoc(Document):
oid = ObjectIdField()
doc = MyDoc(oid="not-an-oid!")
with pytest.raises(ValidationError, match="Invalid ObjectID"):
doc.save()
def test_query_none_value_dont_raise(self):
# cf issue #2681
class MyDoc(Document):
oid = ObjectIdField(null=True)
_ = list(MyDoc.objects(oid=None))
| TestObjectIdField |
python | doocs__leetcode | solution/0200-0299/0293.Flip Game/Solution.py | {
"start": 0,
"end": 350
} | class ____:
def generatePossibleNextMoves(self, currentState: str) -> List[str]:
s = list(currentState)
ans = []
for i, (a, b) in enumerate(pairwise(s)):
if a == b == "+":
s[i] = s[i + 1] = "-"
ans.append("".join(s))
s[i] = s[i + 1] = "+"
return ans
| Solution |
python | PrefectHQ__prefect | tests/server/models/deprecated/test_work_queues.py | {
"start": 1364,
"end": 2176
} | class ____:
async def test_update_work_queue(self, session, work_queue):
result = await models.work_queues.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(
filter=schemas.core.QueueFilter(tags=["updated", "tags"])
),
)
assert result
queue = await session.get(
orm_models.WorkQueue, work_queue.id, populate_existing=True
)
updated_queue = schemas.core.WorkQueue.model_validate(
queue,
from_attributes=True,
)
assert updated_queue.id == work_queue.id
with pytest.warns(DeprecationWarning):
assert updated_queue.filter.tags == ["updated", "tags"]
| TestUpdateWorkQueue |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 15489,
"end": 16885
} | class ____(HelperFunction):
def _calculate(self, X, y, logger, feat_type):
categorical = {
key: True if value.lower() == "categorical" else False
for key, value in feat_type.items()
}
symbols_per_column = []
for i in range(X.shape[1]):
if categorical[X.columns[i] if hasattr(X, "columns") else i]:
column = X.iloc[:, i] if hasattr(X, "iloc") else X[:, i]
unique_values = (
column.unique() if hasattr(column, "unique") else np.unique(column)
)
num_unique = np.sum(pd.notna(unique_values))
symbols_per_column.append(num_unique)
return symbols_per_column
def _calculate_sparse(self, X, y, logger, feat_type):
categorical = {
key: True if value.lower() == "categorical" else False
for key, value in feat_type.items()
}
symbols_per_column = []
new_X = X.tocsc()
for i in range(new_X.shape[1]):
if categorical[X.columns[i] if hasattr(X, "columns") else i]:
unique_values = np.unique(new_X.getcol(i).data)
num_unique = np.sum(np.isfinite(unique_values))
symbols_per_column.append(num_unique)
return symbols_per_column
@metafeatures.define("SymbolsMin", dependency="NumSymbols")
| NumSymbols |
python | aio-libs__aiohttp | tests/test_multipart.py | {
"start": 23900,
"end": 44651
} | class ____:
def test_from_response(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TYPE: 'multipart/related;boundary=":"'})
)
with Stream(b"--:\r\n\r\nhello\r\n--:--") as stream:
resp = Response(h, stream)
res = aiohttp.MultipartReader.from_response(resp) # type: ignore[arg-type]
assert isinstance(res, MultipartResponseWrapper)
assert isinstance(res.stream, aiohttp.MultipartReader)
def test_bad_boundary(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TYPE: "multipart/related;boundary=" + "a" * 80})
)
with Stream(b"") as stream:
resp = Response(h, stream)
with pytest.raises(ValueError):
aiohttp.MultipartReader.from_response(resp) # type: ignore[arg-type]
def test_dispatch(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_TYPE: "text/plain"}))
with Stream(b"--:\r\n\r\necho\r\n--:--") as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
res = reader._get_part_reader(h)
assert isinstance(res, reader.part_reader_cls)
def test_dispatch_bodypart(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_TYPE: "text/plain"}))
with Stream(b"--:\r\n\r\necho\r\n--:--") as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
res = reader._get_part_reader(h)
assert isinstance(res, reader.part_reader_cls)
def test_dispatch_multipart(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TYPE: "multipart/related;boundary=--:--"})
)
with Stream(
b"----:--\r\n"
b"\r\n"
b"test\r\n"
b"----:--\r\n"
b"\r\n"
b"passed\r\n"
b"----:----\r\n"
b"--:--"
) as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
res = reader._get_part_reader(h)
assert isinstance(res, reader.__class__)
def test_dispatch_custom_multipart_reader(self) -> None:
class CustomReader(aiohttp.MultipartReader):
pass
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TYPE: "multipart/related;boundary=--:--"})
)
with Stream(
b"----:--\r\n"
b"\r\n"
b"test\r\n"
b"----:--\r\n"
b"\r\n"
b"passed\r\n"
b"----:----\r\n"
b"--:--"
) as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
reader.multipart_reader_cls = CustomReader
res = reader._get_part_reader(h)
assert isinstance(res, CustomReader)
async def test_emit_next(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TYPE: 'multipart/related;boundary=":"'})
)
with Stream(b"--:\r\n\r\necho\r\n--:--") as stream:
reader = aiohttp.MultipartReader(h, stream)
res = await reader.next()
assert isinstance(res, reader.part_reader_cls)
async def test_invalid_boundary(self) -> None:
with Stream(b"---:\r\n\r\necho\r\n---:--") as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
with pytest.raises(ValueError):
await reader.next()
async def test_read_boundary_across_chunks(self) -> None:
class SplitBoundaryStream(StreamReader):
def __init__(self) -> None:
self.content = [
b"--foobar\r\n\r\n",
b"Hello,\r\n-",
b"-fo",
b"ob",
b"ar\r\n",
b"\r\nwor",
b"ld!",
b"\r\n--f",
b"oobar--",
]
async def read(self, size: int | None = None) -> bytes:
chunk = self.content.pop(0)
assert size is not None and len(chunk) <= size
return chunk
def at_eof(self) -> bool:
return not self.content
async def readline(self) -> bytes:
line = b""
while self.content and b"\n" not in line:
line += self.content.pop(0)
line, *extra = line.split(b"\n", maxsplit=1)
if extra and extra[0]:
self.content.insert(0, extra[0])
return line + b"\n"
def unread_data(self, data: bytes) -> None:
if self.content:
self.content[0] = data + self.content[0]
else:
self.content.append(data)
stream = SplitBoundaryStream()
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary="foobar"'}, stream
)
part = await anext(reader)
assert isinstance(part, BodyPartReader)
result = await part.read_chunk(10)
assert result == b"Hello,"
result = await part.read_chunk(10)
assert result == b""
assert part.at_eof()
part = await anext(reader)
assert isinstance(part, BodyPartReader)
result = await part.read_chunk(10)
assert result == b"world!"
result = await part.read_chunk(10)
assert result == b""
assert part.at_eof()
with pytest.raises(StopAsyncIteration):
await anext(reader)
async def test_release(self) -> None:
with Stream(
b"--:\r\n"
b"Content-Type: multipart/related;boundary=--:--\r\n"
b"\r\n"
b"----:--\r\n"
b"\r\n"
b"test\r\n"
b"----:--\r\n"
b"\r\n"
b"passed\r\n"
b"----:----\r\n"
b"\r\n"
b"--:--"
) as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/mixed;boundary=":"'},
stream,
)
await reader.release()
assert reader.at_eof()
async def test_release_release(self) -> None:
with Stream(b"--:\r\n\r\necho\r\n--:--") as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
await reader.release()
assert reader.at_eof()
await reader.release()
assert reader.at_eof()
async def test_release_next(self) -> None:
with Stream(b"--:\r\n\r\necho\r\n--:--") as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
await reader.release()
assert reader.at_eof()
res = await reader.next()
assert res is None
async def test_second_next_releases_previous_object(self) -> None:
with Stream(b"--:\r\n\r\ntest\r\n--:\r\n\r\npassed\r\n--:--") as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
first = await reader.next()
assert isinstance(first, aiohttp.BodyPartReader)
second = await reader.next()
assert second is not None
assert first.at_eof()
assert not second.at_eof()
async def test_release_without_read_the_last_object(self) -> None:
with Stream(b"--:\r\n\r\ntest\r\n--:\r\n\r\npassed\r\n--:--") as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
first = await reader.next()
second = await reader.next()
third = await reader.next()
assert first is not None
assert second is not None
assert first.at_eof()
assert second.at_eof()
assert second.at_eof()
assert third is None
async def test_read_chunk_by_length_doesnt_break_reader(self) -> None:
with Stream(
b"--:\r\n"
b"Content-Length: 4\r\n\r\n"
b"test"
b"\r\n--:\r\n"
b"Content-Length: 6\r\n\r\n"
b"passed"
b"\r\n--:--"
) as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
body_parts = []
while True:
read_part = b""
part = await reader.next()
if part is None:
break
assert isinstance(part, BodyPartReader)
while not part.at_eof():
read_part += await part.read_chunk(3)
body_parts.append(read_part)
assert body_parts == [b"test", b"passed"]
async def test_read_chunk_from_stream_doesnt_break_reader(self) -> None:
with Stream(
b"--:\r\n"
b"\r\n"
b"chunk"
b"\r\n--:\r\n"
b"\r\n"
b"two_chunks"
b"\r\n--:--"
) as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
body_parts = []
while True:
read_part = b""
part = await reader.next()
if part is None:
break
assert isinstance(part, BodyPartReader)
while not part.at_eof():
chunk = await part.read_chunk(5)
assert chunk
read_part += chunk
body_parts.append(read_part)
assert body_parts == [b"chunk", b"two_chunks"]
async def test_reading_skips_prelude(self) -> None:
with Stream(
b"Multi-part data is not supported.\r\n"
b"\r\n"
b"--:\r\n"
b"\r\n"
b"test\r\n"
b"--:\r\n"
b"\r\n"
b"passed\r\n"
b"--:--"
) as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
stream,
)
first = await reader.next()
assert isinstance(first, aiohttp.BodyPartReader)
second = await reader.next()
assert isinstance(second, BodyPartReader)
assert first.at_eof()
assert not second.at_eof()
async def test_read_form_default_encoding(self) -> None:
with Stream(
b"--:\r\n"
b'Content-Disposition: form-data; name="_charset_"\r\n\r\n'
b"ascii"
b"\r\n"
b"--:\r\n"
b'Content-Disposition: form-data; name="field1"\r\n\r\n'
b"foo"
b"\r\n"
b"--:\r\n"
b"Content-Type: text/plain;charset=UTF-8\r\n"
b'Content-Disposition: form-data; name="field2"\r\n\r\n'
b"foo"
b"\r\n"
b"--:\r\n"
b'Content-Disposition: form-data; name="field3"\r\n\r\n'
b"foo"
b"\r\n"
) as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/form-data;boundary=":"'},
stream,
)
field1 = await reader.next()
assert isinstance(field1, BodyPartReader)
assert field1.name == "field1"
assert field1.get_charset("default") == "ascii"
field2 = await reader.next()
assert isinstance(field2, BodyPartReader)
assert field2.name == "field2"
assert field2.get_charset("default") == "UTF-8"
field3 = await reader.next()
assert isinstance(field3, BodyPartReader)
assert field3.name == "field3"
assert field3.get_charset("default") == "ascii"
async def test_read_form_invalid_default_encoding(self) -> None:
with Stream(
b"--:\r\n"
b'Content-Disposition: form-data; name="_charset_"\r\n\r\n'
b"this-value-is-too-long-to-be-a-charset"
b"\r\n"
b"--:\r\n"
b'Content-Disposition: form-data; name="field1"\r\n\r\n'
b"foo"
b"\r\n"
) as stream:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/form-data;boundary=":"'},
stream,
)
with pytest.raises(RuntimeError, match="Invalid default charset"):
await reader.next()
async def test_writer(writer: aiohttp.MultipartWriter) -> None:
assert writer.size == 7
assert writer.boundary == ":"
async def test_writer_serialize_io_chunk(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
with io.BytesIO(b"foobarbaz") as file_handle:
writer.append(file_handle)
await writer.write(stream)
assert (
buf == b"--:\r\nContent-Type: application/octet-stream"
b"\r\nContent-Length: 9\r\n\r\nfoobarbaz\r\n--:--\r\n"
)
async def test_writer_serialize_json(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
writer.append_json({"привет": "мир"})
await writer.write(stream)
assert (
b'{"\\u043f\\u0440\\u0438\\u0432\\u0435\\u0442":'
b' "\\u043c\\u0438\\u0440"}' in buf
)
async def test_writer_serialize_form(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
data = [("foo", "bar"), ("foo", "baz"), ("boo", "zoo")]
writer.append_form(data)
await writer.write(stream)
assert b"foo=bar&foo=baz&boo=zoo" in buf
async def test_writer_serialize_form_dict(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
data = {"hello": "мир"}
writer.append_form(data)
await writer.write(stream)
assert b"hello=%D0%BC%D0%B8%D1%80" in buf
async def test_writer_write(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
writer.append("foo-bar-baz")
writer.append_json({"test": "passed"})
writer.append_form({"test": "passed"})
writer.append_form([("one", "1"), ("two", "2")])
sub_multipart = aiohttp.MultipartWriter(boundary="::")
sub_multipart.append("nested content")
sub_multipart.headers["X-CUSTOM"] = "test"
writer.append(sub_multipart)
await writer.write(stream)
assert (
b"--:\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 11\r\n\r\n"
b"foo-bar-baz"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/json\r\n"
b"Content-Length: 18\r\n\r\n"
b'{"test": "passed"}'
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"test=passed"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"one=1&two=2"
b"\r\n"
b"--:\r\n"
b'Content-Type: multipart/mixed; boundary="::"\r\n'
b"X-CUSTOM: test\r\nContent-Length: 93\r\n\r\n"
b"--::\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 14\r\n\r\n"
b"nested content\r\n"
b"--::--\r\n"
b"\r\n"
b"--:--\r\n"
) == bytes(buf)
async def test_writer_write_no_close_boundary(buf: bytearray, stream: Stream) -> None:
writer = aiohttp.MultipartWriter(boundary=":")
writer.append("foo-bar-baz")
writer.append_json({"test": "passed"})
writer.append_form({"test": "passed"})
writer.append_form([("one", "1"), ("two", "2")])
await writer.write(stream, close_boundary=False)
assert (
b"--:\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 11\r\n\r\n"
b"foo-bar-baz"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/json\r\n"
b"Content-Length: 18\r\n\r\n"
b'{"test": "passed"}'
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"test=passed"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"one=1&two=2"
b"\r\n"
) == bytes(buf)
async def test_writer_write_no_parts(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
await writer.write(stream)
assert b"--:--\r\n" == bytes(buf)
@pytest.mark.usefixtures("parametrize_zlib_backend")
async def test_writer_serialize_with_content_encoding_gzip(
buf: bytearray,
stream: Stream,
writer: aiohttp.MultipartWriter,
) -> None:
writer.append("Time to Relax!", {CONTENT_ENCODING: "gzip"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Encoding: gzip" == headers
)
decompressor = ZLibBackend.decompressobj(wbits=16 + ZLibBackend.MAX_WBITS)
data = decompressor.decompress(message.split(b"\r\n")[0])
data += decompressor.flush()
assert b"Time to Relax!" == data
async def test_writer_serialize_with_content_encoding_deflate(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
writer.append("Time to Relax!", {CONTENT_ENCODING: "deflate"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Encoding: deflate" == headers
)
thing = b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--\r\n"
assert thing == message
async def test_writer_serialize_with_content_encoding_identity(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
thing = b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00"
writer.append(thing, {CONTENT_ENCODING: "identity"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: application/octet-stream\r\n"
b"Content-Encoding: identity\r\n"
b"Content-Length: 16" == headers
)
assert thing == message.split(b"\r\n")[0]
def test_writer_serialize_with_content_encoding_unknown(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
with pytest.raises(RuntimeError):
writer.append("Time to Relax!", {CONTENT_ENCODING: "snappy"})
async def test_writer_with_content_transfer_encoding_base64(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
writer.append("Time to Relax!", {CONTENT_TRANSFER_ENCODING: "base64"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Transfer-Encoding: base64" == headers
)
assert b"VGltZSB0byBSZWxheCE=" == message.split(b"\r\n")[0]
async def test_writer_content_transfer_encoding_quote_printable(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
writer.append("Привет, мир!", {CONTENT_TRANSFER_ENCODING: "quoted-printable"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Transfer-Encoding: quoted-printable" == headers
)
assert (
b"=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,"
b" =D0=BC=D0=B8=D1=80!" == message.split(b"\r\n")[0]
)
def test_writer_content_transfer_encoding_unknown(
buf: bytearray, stream: Stream, writer: aiohttp.MultipartWriter
) -> None:
with pytest.raises(RuntimeError):
writer.append("Time to Relax!", {CONTENT_TRANSFER_ENCODING: "unknown"})
| TestMultipartReader |
python | crytic__slither | slither/tools/upgradeability/__main__.py | {
"start": 6784,
"end": 7214
} | class ____(argparse.Action): # pylint: disable=too-few-public-methods
def __call__(
self,
parser: Any,
args: Any,
values: Optional[Union[str, Sequence[Any]]],
option_string: Any = None,
) -> None: # pylint: disable=signature-differs
checks = _get_checks()
assert isinstance(values, str)
output_to_markdown(checks, values)
parser.exit()
| OutputMarkdown |
python | geekcomputers__Python | Sorting Algorithims/quicksort_linkedlist.py | {
"start": 319,
"end": 1983
} | class ____:
def __init__(self):
self.head = None
# method to insert nodes at the start of linkedlist
def insert(self, new_data: int) -> None:
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
# method to print the linkedlist
def printLL(self) -> None:
temp = self.head
if temp == None:
return "Linked List is empty"
while temp.next:
print(temp.data, "->", end="")
temp = temp.next
print(temp.data)
return
# Partition algorithm with pivot as first element
def partition(start, end):
if start == None or start.next == None:
return start
prev, curr = start, start.next
pivot = prev.data
while curr != end:
if curr.data < pivot:
prev = prev.next
temp = prev.data
prev.data = curr.data
curr.data = temp
curr = curr.next
temp = prev.data
prev.data = start.data
start.data = temp
return prev
# recursive quicksort for function calls
def quicksort_LL(start, end):
if start != end:
pos = partition(start, end)
quicksort_LL(start, pos)
quicksort_LL(pos.next, end)
return
if __name__ == "__main__":
ll = LinkedList()
print(
"Enter the space seperated values of numbers to be inserted in linkedlist prompted below:"
)
arr = list(map(int, input().split()))
for num in arr:
ll.insert(num)
print("Linkedlist before sorting:")
ll.printLL()
quicksort_LL(ll.head, None)
print("Linkedlist after sorting: ")
ll.printLL()
| LinkedList |
python | cython__cython | tests/run/withstat_py.py | {
"start": 133,
"end": 3619
} | class ____(object):
def __init__(self, value, exit_ret = None):
self.value = value
self.exit_ret = exit_ret
def __exit__(self, a, b, tb):
print("exit %s %s %s" % (typename(a), typename(b), typename(tb)))
return self.exit_ret
def __enter__(self):
print("enter")
return self.value
def no_as():
"""
>>> no_as()
enter
hello
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager("value"):
print("hello")
def basic():
"""
>>> basic()
enter
value
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager("value") as x:
print(x)
def with_pass():
"""
>>> with_pass()
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager("value") as x:
pass
def with_return():
"""
>>> print(with_return())
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
value
"""
with ContextManager("value") as x:
return x
def with_break():
"""
>>> print(with_break())
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
a
"""
for c in list("abc"):
with ContextManager("value") as x:
break
print("FAILED")
return c
def with_continue():
"""
>>> print(with_continue())
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
c
"""
for c in list("abc"):
with ContextManager("value") as x:
continue
print("FAILED")
return c
def with_exception(exit_ret):
"""
>>> with_exception(None)
enter
value
exit <type 'type'> <type 'MyException'> <type 'traceback'>
outer except
>>> with_exception(True)
enter
value
exit <type 'type'> <type 'MyException'> <type 'traceback'>
"""
try:
with ContextManager("value", exit_ret=exit_ret) as value:
print(value)
raise MyException()
except:
print("outer except")
def with_real_lock():
"""
>>> with_real_lock()
about to acquire lock
holding lock
lock no longer held
"""
from threading import Lock
lock = Lock()
print("about to acquire lock")
with lock:
print("holding lock")
print("lock no longer held")
def functions_in_with():
"""
>>> f = functions_in_with()
enter
exit <type 'type'> <type 'MyException'> <type 'traceback'>
outer except
>>> f(1)[0]
1
>>> print(f(1)[1])
value
"""
try:
with ContextManager("value") as value:
def f(x): return x, value
make = lambda x:x()
raise make(MyException)
except:
print("outer except")
return f
def multitarget():
"""
>>> multitarget()
enter
1 2 3 4 5
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager((1, 2, (3, (4, 5)))) as (a, b, (c, (d, e))):
print('%s %s %s %s %s' % (a, b, c, d, e))
def tupletarget():
"""
>>> tupletarget()
enter
(1, 2, (3, (4, 5)))
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager((1, 2, (3, (4, 5)))) as t:
print(t)
| ContextManager |
python | davidhalter__jedi | test/completion/stdlib.py | {
"start": 5847,
"end": 6854
} | class ____:
def function(self, a, b):
return a, b
a = functools.partialmethod(function, 0)
kw = functools.partialmethod(function, b=1.0)
just_partial = functools.partial(function, 1, 2.0)
#? int()
X().a('')[0]
#? str()
X().a('')[1]
# The access of partialmethods on classes are not 100% correct. This doesn't
# really matter, because nobody uses it like that anyway and would take quite a
# bit of work to fix all of these cases.
#? str()
X.a('')[0]
#?
X.a('')[1]
#? X()
X.a(X(), '')[0]
#? str()
X.a(X(), '')[1]
tup = X().kw(1)
#? int()
tup[0]
#? float()
tup[1]
tup = X.kw(1)
#?
tup[0]
#? float()
tup[1]
tup = X.kw(X(), 1)
#? int()
tup[0]
#? float()
tup[1]
#? float()
X.just_partial('')[0]
#? str()
X.just_partial('')[1]
#? float()
X().just_partial('')[0]
#? str()
X().just_partial('')[1]
# python >= 3.8
@functools.lru_cache
def x() -> int: ...
@functools.lru_cache()
def y() -> float: ...
@functools.lru_cache(8)
def z() -> str: ...
#? int()
x()
#? float()
y()
#? str()
z()
| X |
python | getsentry__sentry | src/sentry/monitors/models.py | {
"start": 27786,
"end": 28760
} | class ____(Model):
"""
Records an instance where we have detected a monitor environment to be
broken based on a long duration of failure and consecutive failing check-ins
"""
__relocation_scope__ = RelocationScope.Excluded
monitor_incident = FlexibleForeignKey("monitors.MonitorIncident")
detection_timestamp = models.DateTimeField(auto_now_add=True)
user_notified_timestamp = models.DateTimeField(null=True, db_index=True)
env_muted_timestamp = models.DateTimeField(null=True, db_index=True)
class Meta:
app_label = "monitors"
db_table = "sentry_monitorenvbrokendetection"
def get_cron_monitor(detector: Detector) -> Monitor:
"""
Given a detector get the matching cron monitor.
"""
data_source = detector.data_sources.first()
assert data_source
return Monitor.objects.get(id=int(data_source.source_id))
@data_source_type_registry.register(DATA_SOURCE_CRON_MONITOR)
| MonitorEnvBrokenDetection |
python | django__django | django/http/response.py | {
"start": 23407,
"end": 23524
} | class ____(HttpResponseRedirectBase):
status_code = 302
status_code_preserve_request = 307
| HttpResponseRedirect |
python | plotly__plotly.py | plotly/graph_objs/scattercarpet/legendgrouptitle/_font.py | {
"start": 233,
"end": 9957
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattercarpet.legendgrouptitle"
_path_str = "scattercarpet.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattercarpet.
legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattercarpet.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/checkpoint_test.py | {
"start": 6905,
"end": 8025
} | class ____(base.Trackable):
"""A Trackable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {base.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
def _get_all_checkpoint_names(root):
serialized_tensors, _, _, _ = save_util.serialize_graph_view(
graph_view.ObjectGraphView(root))
checkpoint_names = []
for tensor_dict in serialized_tensors.values():
checkpoint_names.extend(tensor_dict.keys())
return checkpoint_names
| _OwnsMirroredVariables |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/endpoints/backend/main.py | {
"start": 971,
"end": 1368
} | class ____(messages.Message):
"""Collection of Greetings."""
items = messages.MessageField(Greeting, 1, repeated=True)
STORED_GREETINGS = GreetingCollection(
items=[
Greeting(message="hello world!"),
Greeting(message="goodbye world!"),
]
)
# [END endpoints_messages]
# [START endpoints_greeting_api]
@endpoints.api(name="greeting", version="v1")
| GreetingCollection |
python | huggingface__transformers | src/transformers/models/gemma/modeling_gemma.py | {
"start": 9798,
"end": 12999
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: GemmaConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = not getattr(config, "use_bidirectional_attention", False)
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| GemmaAttention |
python | scrapy__scrapy | scrapy/exporters.py | {
"start": 12287,
"end": 13919
} | class ____(BaseItemExporter):
"""This is a base class for item exporters that extends
:class:`BaseItemExporter` with support for nested items.
It serializes items to built-in Python types, so that any serialization
library (e.g. :mod:`json` or msgpack_) can be used on top of it.
.. _msgpack: https://pypi.org/project/msgpack/
"""
def _configure(self, options: dict[str, Any], dont_fail: bool = False) -> None:
super()._configure(options, dont_fail)
if not self.encoding:
self.encoding = "utf-8"
def serialize_field(
self, field: Mapping[str, Any] | Field, name: str, value: Any
) -> Any:
serializer: Callable[[Any], Any] = field.get(
"serializer", self._serialize_value
)
return serializer(value)
def _serialize_value(self, value: Any) -> Any:
if isinstance(value, Item):
return self.export_item(value)
if isinstance(value, (str, bytes)):
return to_unicode(value, encoding=self.encoding)
if is_item(value):
return dict(self._serialize_item(value))
if is_listlike(value):
return [self._serialize_value(v) for v in value]
return value
def _serialize_item(self, item: Any) -> Iterable[tuple[str | bytes, Any]]:
for key, value in ItemAdapter(item).items():
yield key, self._serialize_value(value)
def export_item(self, item: Any) -> dict[str | bytes, Any]: # type: ignore[override]
result: dict[str | bytes, Any] = dict(self._get_serialized_fields(item))
return result
| PythonItemExporter |
python | openai__openai-python | src/openai/types/responses/response_reasoning_item.py | {
"start": 455,
"end": 646
} | class ____(BaseModel):
text: str
"""The reasoning text from the model."""
type: Literal["reasoning_text"]
"""The type of the reasoning text. Always `reasoning_text`."""
| Content |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 17408,
"end": 18133
} | class ____(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViTConfig->DPTConfig, ViTAttention->DPTViTAttention, ViTIntermediate->DPTViTIntermediate, ViTOutput->DPTViTOutput, ViTLayer->DPTViTLayer
| DPTViTOutput |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/vector/stateful_observation.py | {
"start": 484,
"end": 4756
} | class ____(VectorObservationWrapper, gym.utils.RecordConstructorArgs):
"""This wrapper will normalize observations s.t. each coordinate is centered with unit variance.
The property `_update_running_mean` allows to freeze/continue the running mean calculation of the observation
statistics. If `True` (default), the `RunningMeanStd` will get updated every step and reset call.
If `False`, the calculated statistics are used but not updated anymore; this may be used during evaluation.
Note:
The normalization depends on past trajectories and observations will not be normalized correctly if the wrapper was
newly instantiated or the policy was changed recently.
Example without the normalize observation wrapper:
>>> import gymnasium as gym
>>> envs = gym.make_vec("CartPole-v1", num_envs=3, vectorization_mode="sync")
>>> obs, info = envs.reset(seed=123)
>>> _ = envs.action_space.seed(123)
>>> for _ in range(100):
... obs, *_ = envs.step(envs.action_space.sample())
>>> np.mean(obs)
np.float32(0.024251968)
>>> np.std(obs)
np.float32(0.62259156)
>>> envs.close()
Example with the normalize observation wrapper:
>>> import gymnasium as gym
>>> envs = gym.make_vec("CartPole-v1", num_envs=3, vectorization_mode="sync")
>>> envs = NormalizeObservation(envs)
>>> obs, info = envs.reset(seed=123)
>>> _ = envs.action_space.seed(123)
>>> for _ in range(100):
... obs, *_ = envs.step(envs.action_space.sample())
>>> np.mean(obs)
np.float32(-0.2359734)
>>> np.std(obs)
np.float32(1.1938739)
>>> envs.close()
"""
def __init__(self, env: VectorEnv, epsilon: float = 1e-8):
"""This wrapper will normalize observations s.t. each coordinate is centered with unit variance.
Args:
env (Env): The environment to apply the wrapper
epsilon: A stability parameter that is used when scaling the observations.
"""
gym.utils.RecordConstructorArgs.__init__(self, epsilon=epsilon)
VectorObservationWrapper.__init__(self, env)
if "autoreset_mode" not in self.env.metadata:
warn(
f"{self} is missing `autoreset_mode` data. Assuming that the vector environment it follows the `NextStep` autoreset api or autoreset is disabled. Read https://farama.org/Vector-Autoreset-Mode for more details."
)
else:
assert self.env.metadata["autoreset_mode"] in {AutoresetMode.NEXT_STEP}
self.obs_rms = RunningMeanStd(
shape=self.single_observation_space.shape,
dtype=self.single_observation_space.dtype,
)
self.epsilon = epsilon
self._update_running_mean = True
@property
def update_running_mean(self) -> bool:
"""Property to freeze/continue the running mean calculation of the observation statistics."""
return self._update_running_mean
@update_running_mean.setter
def update_running_mean(self, setting: bool):
"""Sets the property to freeze/continue the running mean calculation of the observation statistics."""
self._update_running_mean = setting
def reset(
self,
*,
seed: int | list[int] | None = None,
options: dict[str, Any] | None = None,
) -> tuple[ObsType, dict[str, Any]]:
"""Reset function for `NormalizeObservationWrapper` which is disabled for partial resets."""
assert (
options is None
or "reset_mask" not in options
or np.all(options["reset_mask"])
)
return super().reset(seed=seed, options=options)
def observations(self, observations: ObsType) -> ObsType:
"""Defines the vector observation normalization function.
Args:
observations: A vector observation from the environment
Returns:
the normalized observation
"""
if self._update_running_mean:
self.obs_rms.update(observations)
return (observations - self.obs_rms.mean) / np.sqrt(
self.obs_rms.var + self.epsilon
)
| NormalizeObservation |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/events.py | {
"start": 5132,
"end": 5582
} | class ____(FileSystemEvent):
"""File system event representing file modification on the file system."""
event_type = EVENT_TYPE_MODIFIED
def __init__(self, src_path):
super(FileModifiedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
| FileModifiedEvent |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 21470,
"end": 21590
} | class ____(DagsterError):
"""Indicates that a pipeline run already exists in a run storage."""
| DagsterRunAlreadyExists |
python | django__django | tests/migrations/test_migrations_namespace_package/0001_initial.py | {
"start": 43,
"end": 341
} | class ____(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
],
),
]
| Migration |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 1147,
"end": 1267
} | class ____[*Ts = Unpack[tuple[int]]]: ...
# This should generate an error because default must be unpacked tuple.
| ClassTs2 |
python | django__django | tests/custom_lookups/tests.py | {
"start": 1586,
"end": 1826
} | class ____(models.Transform):
bilateral = True
lookup_name = "mult3"
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return "3 * (%s)" % lhs, lhs_params
| Mult3BilateralTransform |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/partial1.py | {
"start": 2409,
"end": 2595
} | class ____(Protocol[_T2]):
def __new__(cls, __func: Callable[..., _T2]) -> Self: ...
def func9() -> int: ...
# This should generate an error.
x: Partial[str] = partial(func9)
| Partial |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/strategies.py | {
"start": 7920,
"end": 21949
} | class ____(Generic[Ex]):
"""A ``SearchStrategy`` tells Hypothesis how to generate that kind of input.
This class is only part of the public API for use in type annotations, so that
you can write e.g. ``-> SearchStrategy[Foo]`` for your function which returns
``builds(Foo, ...)``. Do not inherit from or directly instantiate this class.
"""
__module__: str = "hypothesis.strategies"
LABELS: ClassVar[dict[type, int]] = {}
# triggers `assert isinstance(label, int)` under threading when setting this
# in init instead of a classvar. I'm not sure why, init should be safe. But
# this works so I'm not looking into it further atm.
__label: int | UniqueIdentifier | None = None
def __init__(self):
self.validate_called: dict[int, bool] = {}
def is_currently_empty(self, data: ConjectureData) -> bool:
"""
Returns whether this strategy is currently empty. Unlike ``empty``,
which is computed based on static information and cannot change,
``is_currently_empty`` may change over time based on choices made
during the test case.
This is currently only used for stateful testing, where |Bundle| grows a
list of values to choose from over the course of a test case.
``data`` will only be used for introspection. No values will be drawn
from it in a way that modifies the choice sequence.
"""
return self.is_empty
@property
def is_empty(self) -> Any:
# Returns True if this strategy can never draw a value and will always
# result in the data being marked invalid.
# The fact that this returns False does not guarantee that a valid value
# can be drawn - this is not intended to be perfect, and is primarily
# intended to be an optimisation for some cases.
return recursive_property(self, "is_empty", True)
# Returns True if values from this strategy can safely be reused without
# this causing unexpected behaviour.
# True if values from this strategy can be implicitly reused (e.g. as
# background values in a numpy array) without causing surprising
# user-visible behaviour. Should be false for built-in strategies that
# produce mutable values, and for strategies that have been mapped/filtered
# by arbitrary user-provided functions.
@property
def has_reusable_values(self) -> Any:
return recursive_property(self, "has_reusable_values", True)
@property
def is_cacheable(self) -> Any:
"""
Whether it is safe to hold on to instances of this strategy in a cache.
See _STRATEGY_CACHE.
"""
return recursive_property(self, "is_cacheable", True)
def calc_is_cacheable(self, recur: RecurT) -> bool:
return True
def calc_is_empty(self, recur: RecurT) -> bool:
# Note: It is correct and significant that the default return value
# from calc_is_empty is False despite the default value for is_empty
# being true. The reason for this is that strategies should be treated
# as empty absent evidence to the contrary, but most basic strategies
# are trivially non-empty and it would be annoying to have to override
# this method to show that.
return False
def calc_has_reusable_values(self, recur: RecurT) -> bool:
return False
def example(self) -> Ex: # FIXME
"""Provide an example of the sort of value that this strategy generates.
This method is designed for use in a REPL, and will raise an error if
called from inside |@given| or a strategy definition. For serious use,
see |@composite| or |st.data|.
"""
if getattr(sys, "ps1", None) is None: # pragma: no branch
# The other branch *is* covered in cover/test_examples.py; but as that
# uses `pexpect` for an interactive session `coverage` doesn't see it.
warnings.warn(
"The `.example()` method is good for exploring strategies, but should "
"only be used interactively. We recommend using `@given` for tests - "
"it performs better, saves and replays failures to avoid flakiness, "
f"and reports minimal examples. (strategy: {self!r})",
NonInteractiveExampleWarning,
stacklevel=2,
)
context = _current_build_context.value
if context is not None:
if context.data is not None and context.data.depth > 0:
raise HypothesisException(
"Using example() inside a strategy definition is a bad "
"idea. Instead consider using hypothesis.strategies.builds() "
"or @hypothesis.strategies.composite to define your strategy."
" See https://hypothesis.readthedocs.io/en/latest/data.html"
"#hypothesis.strategies.builds or "
"https://hypothesis.readthedocs.io/en/latest/data.html"
"#composite-strategies for more details."
)
else:
raise HypothesisException(
"Using example() inside a test function is a bad "
"idea. Instead consider using hypothesis.strategies.data() "
"to draw more examples during testing. See "
"https://hypothesis.readthedocs.io/en/latest/data.html"
"#drawing-interactively-in-tests for more details."
)
try:
return self.__examples.pop()
except (AttributeError, IndexError):
self.__examples: list[Ex] = []
from hypothesis.core import given
# Note: this function has a weird name because it might appear in
# tracebacks, and we want users to know that they can ignore it.
@given(self)
@settings(
database=None,
# generate only a few examples at a time to avoid slow interactivity
# for large strategies. The overhead of @given is very small relative
# to generation, so a small batch size is fine.
max_examples=10,
deadline=None,
verbosity=Verbosity.quiet,
phases=(Phase.generate,),
suppress_health_check=list(HealthCheck),
)
def example_generating_inner_function(
ex: Ex, # type: ignore # mypy is overzealous in preventing covariant params
) -> None:
self.__examples.append(ex)
example_generating_inner_function()
shuffle(self.__examples)
return self.__examples.pop()
def map(self, pack: Callable[[Ex], T]) -> "SearchStrategy[T]":
"""Returns a new strategy which generates a value from this one, and
then returns ``pack(value)``. For example, ``integers().map(str)``
could generate ``str(5)`` == ``"5"``.
"""
if is_identity_function(pack):
return self # type: ignore # Mypy has no way to know that `Ex == T`
return MappedStrategy(self, pack=pack)
def flatmap(
self, expand: Callable[[Ex], "SearchStrategy[T]"]
) -> "SearchStrategy[T]": # FIXME
"""Old syntax for a special case of |@composite|:
.. code-block:: python
@st.composite
def flatmap_like(draw, base_strategy, expand):
value = draw(base_strategy)
new_strategy = expand(value)
return draw(new_strategy)
We find that the greater readability of |@composite| usually outweighs
the verbosity, with a few exceptions for simple cases or recipes like
``from_type(type).flatmap(from_type)`` ("pick a type, get a strategy for
any instance of that type, and then generate one of those").
"""
from hypothesis.strategies._internal.flatmapped import FlatMapStrategy
return FlatMapStrategy(self, expand=expand)
# Note that we previously had condition extracted to a type alias as
# PredicateT. However, that was only useful when not specifying a relationship
# between the generic Ts and some other function param / return value.
# If we do want to - like here, where we want to say that the Ex arg to condition
# is of the same type as the strategy's Ex - then you need to write out the
# entire Callable[[Ex], Any] expression rather than use a type alias.
# TypeAlias is *not* simply a macro that inserts the text. TypeAlias will not
# reference the local TypeVar context.
def filter(self, condition: Callable[[Ex], Any]) -> "SearchStrategy[Ex]":
"""Returns a new strategy that generates values from this strategy
which satisfy the provided condition.
Note that if the condition is too hard to satisfy this might result
in your tests failing with an Unsatisfiable exception.
A basic version of the filtering logic would look something like:
.. code-block:: python
@st.composite
def filter_like(draw, strategy, condition):
for _ in range(3):
value = draw(strategy)
if condition(value):
return value
assume(False)
"""
return FilteredStrategy(self, conditions=(condition,))
@property
def branches(self) -> Sequence["SearchStrategy[Ex]"]:
return [self]
def __or__(self, other: "SearchStrategy[T]") -> "SearchStrategy[Ex | T]":
"""Return a strategy which produces values by randomly drawing from one
of this strategy or the other strategy.
This method is part of the public API.
"""
if not isinstance(other, SearchStrategy):
raise ValueError(f"Cannot | a SearchStrategy with {other!r}")
# Unwrap explicitly or'd strategies. This turns the
# common case of e.g. st.integers() | st.integers() | st.integers() from
#
# one_of(one_of(integers(), integers()), integers())
#
# into
#
# one_of(integers(), integers(), integers())
#
# This is purely an aesthetic unwrapping, for e.g. reprs. In practice
# we use .branches / .element_strategies to get the list of possible
# strategies, so this unwrapping is *not* necessary for correctness.
strategies: list[SearchStrategy] = []
strategies.extend(
self.original_strategies if isinstance(self, OneOfStrategy) else [self]
)
strategies.extend(
other.original_strategies if isinstance(other, OneOfStrategy) else [other]
)
return OneOfStrategy(strategies)
def __bool__(self) -> bool:
warnings.warn(
f"bool({self!r}) is always True, did you mean to draw a value?",
HypothesisWarning,
stacklevel=2,
)
return True
def validate(self) -> None:
"""Throw an exception if the strategy is not valid.
Strategies should implement ``do_validate``, which is called by this
method. They should not override ``validate``.
This can happen due to invalid arguments, or lazy construction.
"""
thread_id = threading.get_ident()
if self.validate_called.get(thread_id, False):
return
# we need to set validate_called before calling do_validate, for
# recursive / deferred strategies. But if a thread switches after
# validate_called but before do_validate, we might have a strategy
# which does weird things like drawing when do_validate would error but
# its params are technically valid (e.g. a param was passed as 1.0
# instead of 1) and get into weird internal states.
#
# There are two ways to fix this.
# (1) The first is a per-strategy lock around do_validate. Even though we
# expect near-zero lock contention, this still adds the lock overhead.
# (2) The second is allowing concurrent .validate calls. Since validation
# is (assumed to be) deterministic, both threads will produce the same
# end state, so the validation order or race conditions does not matter.
#
# In order to avoid the lock overhead of (1), we use (2) here. See also
# discussion in https://github.com/HypothesisWorks/hypothesis/pull/4473.
try:
self.validate_called[thread_id] = True
self.do_validate()
self.is_empty
self.has_reusable_values
except Exception:
self.validate_called[thread_id] = False
raise
@property
def class_label(self) -> int:
cls = self.__class__
try:
return cls.LABELS[cls]
except KeyError:
pass
result = calc_label_from_cls(cls)
cls.LABELS[cls] = result
return result
@property
def label(self) -> int:
if isinstance((label := self.__label), int):
# avoid locking if we've already completely computed the label.
return label
with label_lock:
if self.__label is calculating:
return 0
self.__label = calculating
self.__label = self.calc_label()
return self.__label
def calc_label(self) -> int:
return self.class_label
def do_validate(self) -> None:
pass
def do_draw(self, data: ConjectureData) -> Ex:
raise NotImplementedError(f"{type(self).__name__}.do_draw")
def _is_hashable(value: object) -> tuple[bool, int | None]:
# hashing can be expensive; return the hash value if we compute it, so that
# callers don't have to recompute.
try:
return (True, hash(value))
except TypeError:
return (False, None)
def is_hashable(value: object) -> bool:
return _is_hashable(value)[0]
| SearchStrategy |
python | getsentry__sentry | src/sentry/grouping/component.py | {
"start": 8837,
"end": 8924
} | class ____(BaseGroupingComponent[str]):
id: str = "value"
| ErrorValueGroupingComponent |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 48687,
"end": 49196
} | class ____(PrefectFilterBaseModel):
"""Filter by `Log.flow_run_id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of flow run IDs to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.Log.flow_run_id.in_(self.any_))
return filters
| LogFilterFlowRunId |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_math_ops_test.py | {
"start": 17927,
"end": 19909
} | class ____(test_util.TensorFlowTestCase):
def testRHSDispatchingAndErrorRaising(self):
if context.executing_eagerly():
error = ValueError
error_message = r"Attempt to convert a value .* with an unsupported type"
else:
error = TypeError
error_message = r"Failed to convert elements of .* to Tensor"
class RHSReturnsTrue:
def __radd__(self, other):
return True
def __rmod__(self, other):
return False
a = array_ops.ones([1], dtype=dtypes.int32) + RHSReturnsTrue()
self.assertEqual(a, True)
a = _get_weak_tensor(5, dtype=dtypes.int32) + RHSReturnsTrue()
self.assertEqual(a, True)
a = array_ops.ones([1], dtype=dtypes.float32) % RHSReturnsTrue()
self.assertEqual(a, False)
a = _get_weak_tensor(5, dtype=dtypes.float32) % RHSReturnsTrue()
self.assertEqual(a, False)
class RHSRaisesError:
def __radd__(self, other):
raise TypeError("RHS not implemented")
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + RHSRaisesError()
self.evaluate(a)
with self.assertRaisesRegex(error, error_message):
a = _get_weak_tensor([1], dtype=dtypes.int32) + RHSRaisesError()
self.evaluate(a)
class RHSReturnsNotImplemented:
def __radd__(self, other):
return NotImplemented
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + RHSReturnsNotImplemented()
self.evaluate(a)
a = _get_weak_tensor([1], dtype=dtypes.int32) + RHSReturnsNotImplemented()
self.evaluate(a)
class RHSNotImplemented:
pass
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + RHSNotImplemented()
self.evaluate(a)
a = _get_weak_tensor([1], dtype=dtypes.int32) + RHSNotImplemented()
self.evaluate(a)
@test_util.run_all_in_graph_and_eager_modes
| BinaryOpsTest |
python | ray-project__ray | rllib/connectors/agent/env_sampling.py | {
"start": 299,
"end": 964
} | class ____(AgentConnector):
def __init__(self, ctx: ConnectorContext, sign=False, limit=None):
super().__init__(ctx)
self.observation_space = ctx.observation_space
def transform(self, ac_data: AgentConnectorDataType) -> AgentConnectorDataType:
# EnvSamplingAgentConnector is a no-op connector.
return ac_data
def to_state(self):
return EnvSamplingAgentConnector.__name__, {}
@staticmethod
def from_state(ctx: ConnectorContext, params: Any):
return EnvSamplingAgentConnector(ctx, **params)
register_connector(EnvSamplingAgentConnector.__name__, EnvSamplingAgentConnector)
| EnvSamplingAgentConnector |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/reduction_ops_test.py | {
"start": 42885,
"end": 47504
} | class ____(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False, zero=0,
feed_dict=None):
np_ans = (x != zero).astype(np.int32)
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keepdims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu) as sess:
tf_ans = math_ops.count_nonzero(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, False, use_gpu=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=False, feed_dict=feed_dict)
@test_util.run_deprecated_v1
def testBoolReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([False, False, True, False, False, True])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([0.0, 1.0, -1.0, 0.0, 0.0, 3.0]).astype(np.float32)
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.floor(np.arange(0.0, 210.0) / 100.0).reshape([2, 3, 5,
7]).astype(
np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testDegenerate(self):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
for dtype in (dtypes.bool,):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.count_nonzero(x, [0])
self.assertAllEqual(y, np.zeros(9938))
def testStringReduce(self):
# Test case for GitHub issue 18712
with self.cached_session() as sess:
v = math_ops.count_nonzero(constant_op.constant(["test"]))
self.assertAllClose(self.evaluate(v), 1)
@test_util.run_deprecated_v1
def testStringReduce1D(self):
# Create a 1D array of strings
x = np.asarray(["", "", "a", "", "", "b"])
self._compare(x, None, keepdims=False, zero=np.str_(""))
self._compare(x, [], keepdims=False, zero=np.str_(""))
self._compare(x, [0], keepdims=False, zero=np.str_(""))
self._compare(x, None, keepdims=True, zero=np.str_(""))
self._compare(x, [], keepdims=True, zero=np.str_(""))
self._compare(x, [0], keepdims=True, zero=np.str_(""))
@test_util.run_deprecated_v1
def testStringReduce2D(self):
# Create a 2D array of strings
x = np.asarray([["", "", "a", "", "", "b"],
["", "c", "", "d", "", ""],
["e", "", "f", "", "", ""]])
self._compare(x, None, keepdims=False, zero=np.str_(""))
self._compare(x, [], keepdims=False, zero=np.str_(""))
self._compare(x, [0], keepdims=False, zero=np.str_(""))
self._compare(x, [1], keepdims=False, zero=np.str_(""))
self._compare(x, [0, 1], keepdims=False, zero=np.str_(""))
self._compare(x, None, keepdims=True, zero=np.str_(""))
self._compare(x, [], keepdims=True, zero=np.str_(""))
self._compare(x, [0], keepdims=True, zero=np.str_(""))
self._compare(x, [0, 1], keepdims=True, zero=np.str_(""))
if __name__ == "__main__":
test.main()
| CountNonzeroReductionTest |
python | django__django | tests/auth_tests/models/with_foreign_key.py | {
"start": 111,
"end": 229
} | class ____(models.Model):
email = models.EmailField(verbose_name="email address", max_length=255, unique=True)
| Email |
python | spyder-ide__spyder | spyder/plugins/outlineexplorer/plugin.py | {
"start": 537,
"end": 6591
} | class ____(SpyderDockablePlugin):
NAME = 'outline_explorer'
CONF_SECTION = 'outline_explorer'
REQUIRES = [Plugins.Completions, Plugins.Editor]
OPTIONAL = []
CONF_FILE = False
WIDGET_CLASS = OutlineExplorerWidget
# ---- SpyderDockablePlugin API
# -------------------------------------------------------------------------
@staticmethod
def get_name() -> str:
"""Return widget title."""
return _('Outline Explorer')
@staticmethod
def get_description() -> str:
"""Return the description of the outline explorer widget."""
return _("Explore functions, classes and methods in open files. Note "
"that if you disable the 'Completion and linting' plugin, "
"this one won't work.")
@classmethod
def get_icon(cls):
"""Return the outline explorer icon."""
return cls.create_icon('outline_explorer')
def on_initialize(self):
if self.main:
self.main.restore_scrollbar_position.connect(
self._restore_scrollbar_position)
self.sig_mainwindow_state_changed.connect(
self._on_mainwindow_state_changed)
@on_plugin_available(plugin=Plugins.Completions)
def on_completions_available(self):
completions = self.get_plugin(Plugins.Completions)
completions.sig_language_completions_available.connect(
self.start_symbol_services)
completions.sig_stop_completions.connect(
self.stop_symbol_services)
@on_plugin_available(plugin=Plugins.Editor)
def on_editor_available(self):
widget = self.get_widget()
editor = self.get_plugin(Plugins.Editor)
editor.sig_open_files_finished.connect(
self.update_all_editors)
widget.edit_goto.connect(editor.load_edit_goto)
widget.edit.connect(editor.load_edit)
@on_plugin_teardown(plugin=Plugins.Completions)
def on_completions_teardown(self):
completions = self.get_plugin(Plugins.Completions)
completions.sig_language_completions_available.disconnect(
self.start_symbol_services)
completions.sig_stop_completions.disconnect(
self.stop_symbol_services)
@on_plugin_teardown(plugin=Plugins.Editor)
def on_editor_teardown(self):
widget = self.get_widget()
editor = self.get_plugin(Plugins.Editor)
editor.sig_open_files_finished.disconnect(
self.update_all_editors)
widget.edit_goto.disconnect(editor.load_edit_goto)
widget.edit.disconnect(editor.load_edit)
# ----- Private API
# -------------------------------------------------------------------------
@Slot(object)
def _on_mainwindow_state_changed(self, window_state):
"""Actions to take when the main window has changed its state."""
if window_state == Qt.WindowMinimized:
# There's no need to update the treewidget when the plugin is
# minimized.
self.get_widget().change_tree_visibility(False)
else:
self.get_widget().change_tree_visibility(True)
def _restore_scrollbar_position(self):
"""Restoring scrollbar position after main window is visible"""
scrollbar_pos = self.get_conf('scrollbar_position', None)
explorer = self.get_widget()
if scrollbar_pos is not None:
explorer.treewidget.set_scrollbar_position(scrollbar_pos)
def _set_toggle_view_action_state(self):
"""Set state of the toogle view action."""
self.get_widget().blockSignals(True)
if self.get_widget().is_visible:
self.get_widget().toggle_view_action.setChecked(True)
else:
self.get_widget().toggle_view_action.setChecked(False)
self.get_widget().blockSignals(False)
# ----- Public API
# -------------------------------------------------------------------------
@Slot(dict, str)
def start_symbol_services(self, capabilities, language):
"""Enable LSP symbols functionality."""
explorer = self.get_widget()
symbol_provider = capabilities.get('documentSymbolProvider', False)
if symbol_provider:
explorer.start_symbol_services(language)
def stop_symbol_services(self, language):
"""Disable LSP symbols functionality."""
explorer = self.get_widget()
explorer.stop_symbol_services(language)
def update_all_editors(self):
"""Update all editors with an associated LSP server."""
explorer = self.get_widget()
explorer.update_all_editors()
def get_supported_languages(self):
"""List of languages with symbols support."""
return self.get_widget().get_supported_languages()
def dock_with_maximized_editor(self):
"""
Actions to take when the plugin is docked next to the editor when the
latter is maximized.
"""
self.get_widget().in_maximized_editor = True
if self.get_conf('show_with_maximized_editor'):
self.main.addDockWidget(Qt.LeftDockWidgetArea, self.dockwidget)
self.dockwidget.show()
# This width is enough to show all buttons in the main toolbar
max_width = 360
# Give an appropiate width to the Outline
editor = self.get_plugin(Plugins.Editor)
self.main.resizeDocks(
[editor.dockwidget, self.dockwidget],
# We set main_window.width() // 7 as the min width for the
# Outline because it's not too wide for small screens.
[self.main.width(), min(self.main.width() // 7, max_width)],
Qt.Horizontal
)
self._set_toggle_view_action_state()
def hide_from_maximized_editor(self):
"""
Actions to take when the plugin is hidden after the editor is
unmaximized.
"""
self.get_widget().in_maximized_editor = False
self._set_toggle_view_action_state()
| OutlineExplorer |
python | encode__httpx | httpx/_decoders.py | {
"start": 9421,
"end": 9806
} | class ____:
"""
Handles incrementally decoding bytes into text
"""
def __init__(self, encoding: str = "utf-8") -> None:
self.decoder = codecs.getincrementaldecoder(encoding)(errors="replace")
def decode(self, data: bytes) -> str:
return self.decoder.decode(data)
def flush(self) -> str:
return self.decoder.decode(b"", True)
| TextDecoder |
python | getsentry__sentry | src/sentry/types/ratelimit.py | {
"start": 730,
"end": 866
} | class ____(Enum):
NOT_LIMITED = "not_limited"
CONCURRENT = "concurrent"
FIXED_WINDOW = "fixed_window"
@dataclass
| RateLimitType |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-confluence/llama_index/readers/confluence/base.py | {
"start": 928,
"end": 2139
} | class ____:
def __init__(
self, custom_parsers: Optional[Dict[FileType, BaseReader]], custom_folder: str
):
self.custom_parsers = custom_parsers or {}
self.custom_folder = custom_folder
def __remove_custom_file(self, file_path: str):
try:
if os.path.exists(file_path):
os.remove(file_path)
except Exception as e:
print(f"Error removing file {file_path}: {e}")
def process_with_custom_parser(
self, file_type: FileType, file_content: bytes, extension: str
) -> Optional[str]:
if file_type not in self.custom_parsers:
return None
file_name = f"{uuid.uuid4().hex}.{extension}"
custom_file_path = os.path.join(self.custom_folder, file_name)
with open(custom_file_path, "wb") as f:
f.write(file_content)
try:
markdown_text = "\n".join(
doc.text
for doc in self.custom_parsers[file_type].load_data(
file_path=custom_file_path
)
)
finally:
self.__remove_custom_file(custom_file_path)
return markdown_text
| CustomParserManager |
python | readthedocs__readthedocs.org | readthedocs/integrations/migrations/0005_change_default_integration_secret.py | {
"start": 150,
"end": 655
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("integrations", "0004_add_integration_secret"),
]
operations = [
migrations.AlterField(
model_name="integration",
name="secret",
field=models.CharField(
blank=True,
help_text="Secret used to validate the payload of the webhook",
max_length=255,
null=True,
),
),
]
| Migration |
python | facebookresearch__faiss | tests/test_referenced_objects.py | {
"start": 487,
"end": 2603
} | class ____(unittest.TestCase):
def test_IndexIVF(self):
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFFlat(quantizer, d, 10)
index.train(xt)
index.add(xb)
del quantizer
gc.collect()
index.add(xb)
def test_count_refs(self):
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFFlat(quantizer, d, 10)
refc1 = sys.getrefcount(quantizer)
del index
gc.collect()
refc2 = sys.getrefcount(quantizer)
assert refc2 == refc1 - 1
def test_IndexIVF_2(self):
index = faiss.IndexIVFFlat(faiss.IndexFlatL2(d), d, 10)
index.train(xt)
index.add(xb)
def test_IndexPreTransform(self):
ltrans = faiss.NormalizationTransform(d)
sub_index = faiss.IndexFlatL2(d)
index = faiss.IndexPreTransform(ltrans, sub_index)
index.add(xb)
del ltrans
gc.collect()
index.add(xb)
del sub_index
gc.collect()
index.add(xb)
def test_IndexPreTransform_2(self):
sub_index = faiss.IndexFlatL2(d)
index = faiss.IndexPreTransform(sub_index)
ltrans = faiss.NormalizationTransform(d)
index.prepend_transform(ltrans)
index.add(xb)
del ltrans
gc.collect()
index.add(xb)
del sub_index
gc.collect()
index.add(xb)
def test_IDMap(self):
sub_index = faiss.IndexFlatL2(d)
index = faiss.IndexIDMap(sub_index)
index.add_with_ids(xb, np.arange(len(xb), dtype='int64'))
del sub_index
gc.collect()
index.add_with_ids(xb, np.arange(len(xb), dtype='int64'))
def test_shards(self):
index = faiss.IndexShards(d)
for _i in range(3):
sub_index = faiss.IndexFlatL2(d)
sub_index.add(xb)
index.add_shard(sub_index)
gc.collect()
index.search(xb, 10)
dbin = 32
xtbin = np.random.randint(256, size=(100, int(dbin / 8))).astype('uint8')
xbbin = np.random.randint(256, size=(20, int(dbin / 8))).astype('uint8')
| TestReferenced |
python | pypa__warehouse | warehouse/oidc/forms/github.py | {
"start": 7310,
"end": 7367
} | class ____(GitHubPublisherBase):
pass
| GitHubPublisherForm |
python | allegroai__clearml | clearml/backend_api/services/v2_23/models.py | {
"start": 22638,
"end": 23863
} | class ____(Response):
"""
Response of models.add_or_update_metadata endpoint.
:param updated: Number of models updated (0 or 1)
:type updated: int
"""
_service = "models"
_action = "add_or_update_metadata"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of models updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(AddOrUpdateMetadataResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| AddOrUpdateMetadataResponse |
python | joke2k__faker | faker/providers/bank/nl_NL/__init__.py | {
"start": 42,
"end": 188
} | class ____(BankProvider):
"""Implement bank provider for ``nl_NL`` locale."""
bban_format = "????##########"
country_code = "NL"
| Provider |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_domains.py | {
"start": 1255,
"end": 8485
} | class ____(TestCase):
def setUp(self):
self.project = get(Project, slug="kong")
def test_https(self):
"""Make sure https is an admin-only attribute."""
form = DomainForm(
{"domain": "example.com", "canonical": True},
project=self.project,
)
self.assertTrue(form.is_valid())
domain = form.save()
self.assertTrue(domain.https)
form = DomainForm(
{
"domain": "example.com",
"canonical": True,
},
project=self.project,
)
self.assertFalse(form.is_valid())
def test_production_domain_not_allowed(self):
"""Make sure user can not enter production domain name."""
form = DomainForm(
{"domain": settings.PRODUCTION_DOMAIN},
project=self.project,
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["domain"][0],
f"{settings.PRODUCTION_DOMAIN} is not a valid domain.",
)
form2 = DomainForm(
{"domain": "test." + settings.PRODUCTION_DOMAIN},
project=self.project,
)
self.assertFalse(form2.is_valid())
self.assertEqual(
form2.errors["domain"][0],
f"{settings.PRODUCTION_DOMAIN} is not a valid domain.",
)
@override_settings(
RTD_RESTRICTED_DOMAINS=[
"readthedocs.org",
"readthedocs.io",
"readthedocs.build",
],
)
def test_restricted_domains_not_allowed(self):
"""Make sure user can not enter public domain name."""
invalid_domains = [
"readthedocs.org",
"test.readthedocs.org",
"app.readthedocs.org",
"test.app.readthedocs.org",
"readthedocs.io",
"test.readthedocs.io",
"docs.readthedocs.io",
"test.docs.readthedocs.io",
"readthedocs.build",
"test.readthedocs.build",
"docs.readthedocs.build",
"test.docs.readthedocs.build",
# Trailing white spaces, sneaky.
"https:// readthedocs.org /",
]
for domain in invalid_domains:
form = DomainForm(
{"domain": domain},
project=self.project,
)
assert not form.is_valid(), domain
assert "is not a valid domain." in form.errors["domain"][0]
def test_domain_with_path(self):
form = DomainForm(
{"domain": "domain.com/foo/bar"},
project=self.project,
)
self.assertTrue(form.is_valid())
domain = form.save()
self.assertEqual(domain.domain, "domain.com")
def test_valid_domains(self):
domains = [
"python.org",
"a.io",
"a.e.i.o.org",
"my.domain.com.edu",
"my-domain.fav",
]
for domain in domains:
form = DomainForm(
{"domain": domain},
project=self.project,
)
self.assertTrue(form.is_valid(), domain)
def test_invalid_domains(self):
domains = [
"python..org",
"****.foo.com",
"domain",
"domain.com.",
"My domain.org",
"i.o",
"[special].com",
"some_thing.org",
"invalid-.com",
"1.1.1.1",
"1.23.45.67",
"127.0.0.1",
"127.0.0.10",
"[1.2.3.4.com",
]
for domain in domains:
form = DomainForm(
{"domain": domain},
project=self.project,
)
self.assertFalse(form.is_valid(), domain)
def test_canonical_change(self):
"""Make sure canonical can be properly changed."""
form = DomainForm(
{"domain": "example.com", "canonical": True},
project=self.project,
)
self.assertTrue(form.is_valid())
domain = form.save()
self.assertEqual(domain.domain, "example.com")
self.assertTrue(form.is_valid())
domain = form.save()
self.assertEqual(domain.domain, "example.com")
form = DomainForm(
{"domain": "example2.com", "canonical": True},
project=self.project,
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["canonical"][0], "Only one domain can be canonical at a time."
)
form = DomainForm(
{"canonical": False},
project=self.project,
instance=domain,
)
self.assertTrue(form.is_valid())
domain = form.save()
self.assertEqual(domain.domain, "example.com")
self.assertFalse(domain.canonical)
def test_allow_change_http_to_https(self):
domain = get(Domain, domain="docs.example.com", https=False)
form = DomainForm(
{"https": True},
project=self.project,
instance=domain,
)
self.assertTrue(form.is_valid())
domain = form.save()
self.assertTrue(domain.https)
def test_dont_allow_changin_https_to_http(self):
domain = get(Domain, domain="docs.example.com", https=True)
form = DomainForm(
{"https": False},
project=self.project,
instance=domain,
)
self.assertTrue(form.is_valid())
domain = form.save()
self.assertTrue(domain.https)
@override_settings(
RTD_DEFAULT_FEATURES=dict(
[RTDProductFeature(type=TYPE_CNAME, value=2).to_item()]
),
)
def test_domains_limit(self):
feature = get_feature(self.project, TYPE_CNAME)
form = DomainForm(
{
"domain": "docs.user.example.com",
"canonical": True,
},
project=self.project,
)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.project.domains.all().count(), 1)
form = DomainForm(
{
"domain": "docs.dev.example.com",
"canonical": False,
},
project=self.project,
)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.project.domains.all().count(), 2)
# Creating the third (3) domain should fail the validation form
form = DomainForm(
{
"domain": "docs.customer.example.com",
"canonical": False,
},
project=self.project,
)
self.assertFalse(form.is_valid())
msg = (
f"This project has reached the limit of {feature.value} domains. "
"Consider removing unused domains."
)
if settings.RTD_ALLOW_ORGANIZATIONS:
msg = (
f"Your organization has reached the limit of {feature.value} domains. "
"Consider removing unused domains or upgrading your plan."
)
self.assertEqual(form.errors["__all__"][0], msg)
| FormTests |
python | numpy__numpy | numpy/lib/_stride_tricks_impl.py | {
"start": 355,
"end": 19115
} | class ____:
"""Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
def __init__(self, interface, base=None):
self.__array_interface__ = interface
self.base = base
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
@set_module("numpy.lib.stride_tricks")
def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
"""
Create a view into the array with the given shape and strides.
.. warning:: This function has to be used with extreme care, see notes.
Parameters
----------
x : ndarray
Array to create a new.
shape : sequence of int, optional
The shape of the new array. Defaults to ``x.shape``.
strides : sequence of int, optional
The strides of the new array. Defaults to ``x.strides``.
subok : bool, optional
If True, subclasses are preserved.
writeable : bool, optional
If set to False, the returned array will always be readonly.
Otherwise it will be writable if the original array was. It
is advisable to set this to False if possible (see Notes).
Returns
-------
view : ndarray
See also
--------
broadcast_to : broadcast an array to a given shape.
reshape : reshape an array.
lib.stride_tricks.sliding_window_view :
userfriendly and safe function for a creation of sliding window views.
Notes
-----
``as_strided`` creates a view into the array given the exact strides
and shape. This means it manipulates the internal data structure of
ndarray and, if done incorrectly, the array elements can point to
invalid memory and can corrupt results or crash your program.
It is advisable to always use the original ``x.strides`` when
calculating new strides to avoid reliance on a contiguous memory
layout.
Furthermore, arrays created with this function often contain self
overlapping memory, so that two elements are identical.
Vectorized write operations on such arrays will typically be
unpredictable. They may even give different results for small, large,
or transposed arrays.
Since writing to these arrays has to be tested and done with great
care, you may want to use ``writeable=False`` to avoid accidental write
operations.
For these reasons it is advisable to avoid ``as_strided`` when
possible.
"""
# first convert input to array, possibly keeping subclass
x = np.array(x, copy=None, subok=subok)
interface = dict(x.__array_interface__)
if shape is not None:
interface['shape'] = tuple(shape)
if strides is not None:
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
# The route via `__interface__` does not preserve structured
# dtypes. Since dtype should remain unchanged, we set it explicitly.
array.dtype = x.dtype
view = _maybe_view_as_subclass(x, array)
if view.flags.writeable and not writeable:
view.flags.writeable = False
return view
def _sliding_window_view_dispatcher(x, window_shape, axis=None, *,
subok=None, writeable=None):
return (x,)
@array_function_dispatch(
_sliding_window_view_dispatcher, module="numpy.lib.stride_tricks"
)
def sliding_window_view(x, window_shape, axis=None, *,
subok=False, writeable=False):
"""
Create a sliding window view into the array with the given window shape.
Also known as rolling or moving window, the window slides across all
dimensions of the array and extracts subsets of the array at all window
positions.
.. versionadded:: 1.20.0
Parameters
----------
x : array_like
Array to create the sliding window view from.
window_shape : int or tuple of int
Size of window over each axis that takes part in the sliding window.
If `axis` is not present, must have same length as the number of input
array dimensions. Single integers `i` are treated as if they were the
tuple `(i,)`.
axis : int or tuple of int, optional
Axis or axes along which the sliding window is applied.
By default, the sliding window is applied to all axes and
`window_shape[i]` will refer to axis `i` of `x`.
If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to
the axis `axis[i]` of `x`.
Single integers `i` are treated as if they were the tuple `(i,)`.
subok : bool, optional
If True, sub-classes will be passed-through, otherwise the returned
array will be forced to be a base-class array (default).
writeable : bool, optional
When true, allow writing to the returned view. The default is false,
as this should be used with caution: the returned view contains the
same memory location multiple times, so writing to one location will
cause others to change.
Returns
-------
view : ndarray
Sliding window view of the array. The sliding window dimensions are
inserted at the end, and the original dimensions are trimmed as
required by the size of the sliding window.
That is, ``view.shape = x_shape_trimmed + window_shape``, where
``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less
than the corresponding window size.
See Also
--------
lib.stride_tricks.as_strided: A lower-level and less safe routine for
creating arbitrary views from custom shape and strides.
broadcast_to: broadcast an array to a given shape.
Notes
-----
.. warning::
This function creates views with overlapping memory. When
``writeable=True``, writing to the view will modify the original array
and may affect multiple view positions. See the examples below and
:doc:`this guide </user/basics.copies>`
about the difference between copies and views.
For many applications using a sliding window view can be convenient, but
potentially very slow. Often specialized solutions exist, for example:
- `scipy.signal.fftconvolve`
- filtering functions in `scipy.ndimage`
- moving window functions provided by
`bottleneck <https://github.com/pydata/bottleneck>`_.
As a rough estimate, a sliding window approach with an input size of `N`
and a window size of `W` will scale as `O(N*W)` where frequently a special
algorithm can achieve `O(N)`. That means that the sliding window variant
for a window size of 100 can be a 100 times slower than a more specialized
version.
Nevertheless, for small window sizes, when no custom algorithm exists, or
as a prototyping and developing tool, this function can be a good solution.
Examples
--------
>>> import numpy as np
>>> from numpy.lib.stride_tricks import sliding_window_view
>>> x = np.arange(6)
>>> x.shape
(6,)
>>> v = sliding_window_view(x, 3)
>>> v.shape
(4, 3)
>>> v
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5]])
This also works in more dimensions, e.g.
>>> i, j = np.ogrid[:3, :4]
>>> x = 10*i + j
>>> x.shape
(3, 4)
>>> x
array([[ 0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]])
>>> shape = (2,2)
>>> v = sliding_window_view(x, shape)
>>> v.shape
(2, 3, 2, 2)
>>> v
array([[[[ 0, 1],
[10, 11]],
[[ 1, 2],
[11, 12]],
[[ 2, 3],
[12, 13]]],
[[[10, 11],
[20, 21]],
[[11, 12],
[21, 22]],
[[12, 13],
[22, 23]]]])
The axis can be specified explicitly:
>>> v = sliding_window_view(x, 3, 0)
>>> v.shape
(1, 4, 3)
>>> v
array([[[ 0, 10, 20],
[ 1, 11, 21],
[ 2, 12, 22],
[ 3, 13, 23]]])
The same axis can be used several times. In that case, every use reduces
the corresponding original dimension:
>>> v = sliding_window_view(x, (2, 3), (1, 1))
>>> v.shape
(3, 1, 2, 3)
>>> v
array([[[[ 0, 1, 2],
[ 1, 2, 3]]],
[[[10, 11, 12],
[11, 12, 13]]],
[[[20, 21, 22],
[21, 22, 23]]]])
Combining with stepped slicing (`::step`), this can be used to take sliding
views which skip elements:
>>> x = np.arange(7)
>>> sliding_window_view(x, 5)[:, ::2]
array([[0, 2, 4],
[1, 3, 5],
[2, 4, 6]])
or views which move by multiple elements
>>> x = np.arange(7)
>>> sliding_window_view(x, 3)[::2, :]
array([[0, 1, 2],
[2, 3, 4],
[4, 5, 6]])
A common application of `sliding_window_view` is the calculation of running
statistics. The simplest example is the
`moving average <https://en.wikipedia.org/wiki/Moving_average>`_:
>>> x = np.arange(6)
>>> x.shape
(6,)
>>> v = sliding_window_view(x, 3)
>>> v.shape
(4, 3)
>>> v
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5]])
>>> moving_average = v.mean(axis=-1)
>>> moving_average
array([1., 2., 3., 4.])
The two examples below demonstrate the effect of ``writeable=True``.
Creating a view with the default ``writeable=False`` and then writing to
it raises an error.
>>> v = sliding_window_view(x, 3)
>>> v[0,1] = 10
Traceback (most recent call last):
...
ValueError: assignment destination is read-only
Creating a view with ``writeable=True`` and then writing to it changes
the original array and multiple view positions.
>>> x = np.arange(6) # reset x for the second example
>>> v = sliding_window_view(x, 3, writeable=True)
>>> v[0,1] = 10
>>> x
array([ 0, 10, 2, 3, 4, 5])
>>> v
array([[ 0, 10, 2],
[10, 2, 3],
[ 2, 3, 4],
[ 3, 4, 5]])
Note that a sliding window approach is often **not** optimal (see Notes).
"""
window_shape = (tuple(window_shape)
if np.iterable(window_shape)
else (window_shape,))
# first convert input to array, possibly keeping subclass
x = np.array(x, copy=None, subok=subok)
window_shape_array = np.array(window_shape)
if np.any(window_shape_array < 0):
raise ValueError('`window_shape` cannot contain negative values')
if axis is None:
axis = tuple(range(x.ndim))
if len(window_shape) != len(axis):
raise ValueError(f'Since axis is `None`, must provide '
f'window_shape for all dimensions of `x`; '
f'got {len(window_shape)} window_shape elements '
f'and `x.ndim` is {x.ndim}.')
else:
axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)
if len(window_shape) != len(axis):
raise ValueError(f'Must provide matching length window_shape and '
f'axis; got {len(window_shape)} window_shape '
f'elements and {len(axis)} axes elements.')
out_strides = x.strides + tuple(x.strides[ax] for ax in axis)
# note: same axis can be windowed repeatedly
x_shape_trimmed = list(x.shape)
for ax, dim in zip(axis, window_shape):
if x_shape_trimmed[ax] < dim:
raise ValueError(
'window shape cannot be larger than input array shape')
x_shape_trimmed[ax] -= dim - 1
out_shape = tuple(x_shape_trimmed) + window_shape
return as_strided(x, strides=out_strides, shape=out_shape,
subok=subok, writeable=writeable)
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=None, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
extras = []
it = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
op_flags=['readonly'], itershape=shape, order='C')
with it:
# never really has writebackifcopy semantics
broadcast = it.itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
# In a future version this will go away
if not readonly and array.flags._writeable_no_warn:
result.flags.writeable = True
result.flags._warn_on_write = True
return result
def _broadcast_to_dispatcher(array, shape, subok=None):
return (array,)
@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
def broadcast_to(array, shape, subok=False):
"""Broadcast an array to a new shape.
Parameters
----------
array : array_like
The array to broadcast.
shape : tuple or int
The shape of the desired array. A single integer ``i`` is interpreted
as ``(i,)``.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
ValueError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
See Also
--------
broadcast
broadcast_arrays
broadcast_shapes
Examples
--------
>>> import numpy as np
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3))
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
return _broadcast_to(array, shape, subok=subok, readonly=True)
def _broadcast_shape(*args):
"""Returns the shape of the arrays that would result from broadcasting the
supplied arrays against each other.
"""
# use the old-iterator because np.nditer does not handle size 0 arrays
# consistently
b = np.broadcast(*args[:64])
# unfortunately, it cannot handle 64 or more arguments directly
for pos in range(64, len(args), 63):
# ironically, np.broadcast does not properly handle np.broadcast
# objects (it treats them as scalars)
# use broadcasting to avoid allocating the full array
b = broadcast_to(0, b.shape)
b = np.broadcast(b, *args[pos:(pos + 63)])
return b.shape
_size0_dtype = np.dtype([])
@set_module('numpy')
def broadcast_shapes(*args):
"""
Broadcast the input shapes into a single shape.
:ref:`Learn more about broadcasting here <basics.broadcasting>`.
.. versionadded:: 1.20.0
Parameters
----------
*args : tuples of ints, or ints
The shapes to be broadcast against each other.
Returns
-------
tuple
Broadcasted shape.
Raises
------
ValueError
If the shapes are not compatible and cannot be broadcast according
to NumPy's broadcasting rules.
See Also
--------
broadcast
broadcast_arrays
broadcast_to
Examples
--------
>>> import numpy as np
>>> np.broadcast_shapes((1, 2), (3, 1), (3, 2))
(3, 2)
>>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7))
(5, 6, 7)
"""
arrays = [np.empty(x, dtype=_size0_dtype) for x in args]
return _broadcast_shape(*arrays)
def _broadcast_arrays_dispatcher(*args, subok=None):
return args
@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
def broadcast_arrays(*args, subok=False):
"""
Broadcast any number of arrays against each other.
Parameters
----------
*args : array_likes
The arrays to broadcast.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned arrays will be forced to be a base-class array (default).
Returns
-------
broadcasted : tuple of arrays
These arrays are views on the original arrays. They are typically
not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location. If you need
to write to the arrays, make copies first. While you can set the
``writable`` flag True, writing to a single output value may end up
changing more than one location in the output array.
.. deprecated:: 1.17
The output is currently marked so that if written to, a deprecation
warning will be emitted. A future version will set the
``writable`` flag False so writing to it will raise an error.
See Also
--------
broadcast
broadcast_to
broadcast_shapes
Examples
--------
>>> import numpy as np
>>> x = np.array([[1,2,3]])
>>> y = np.array([[4],[5]])
>>> np.broadcast_arrays(x, y)
(array([[1, 2, 3],
[1, 2, 3]]),
array([[4, 4, 4],
[5, 5, 5]]))
Here is a useful idiom for getting contiguous copies instead of
non-contiguous views.
>>> [np.array(a) for a in np.broadcast_arrays(x, y)]
[array([[1, 2, 3],
[1, 2, 3]]),
array([[4, 4, 4],
[5, 5, 5]])]
"""
# nditer is not used here to avoid the limit of 64 arrays.
# Otherwise, something like the following one-liner would suffice:
# return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
# order='C').itviews
args = [np.array(_m, copy=None, subok=subok) for _m in args]
shape = _broadcast_shape(*args)
result = [array if array.shape == shape
else _broadcast_to(array, shape, subok=subok, readonly=False)
for array in args]
return tuple(result)
| DummyArray |
python | RaRe-Technologies__gensim | gensim/test/test_tfidfmodel.py | {
"start": 1014,
"end": 19533
} | class ____(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
def test_transform(self):
# create the transformation model
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
expected = [(0, 0.57735026918962573), (1, 0.57735026918962573), (2, 0.57735026918962573)]
self.assertTrue(np.allclose(transformed, expected))
def test_init(self):
# create the transformation model by analyzing a corpus
# uses the global `corpus`!
model1 = tfidfmodel.TfidfModel(common_corpus)
dfs = common_dictionary.dfs
# make sure the dfs<->idfs transformation works
self.assertEqual(model1.dfs, dfs)
self.assertEqual(model1.idfs, tfidfmodel.precompute_idfs(model1.wglobal, dfs, len(common_corpus)))
# create the transformation model by directly supplying a term->docfreq
# mapping from the global var `dictionary`.
model2 = tfidfmodel.TfidfModel(dictionary=common_dictionary)
self.assertEqual(model1.idfs, model2.idfs)
def test_persistence(self):
# Test persistence without using `smartirs`
fname = get_tmpfile('gensim_models.tst')
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence with using `smartirs`
fname = get_tmpfile('gensim_models_smartirs.tst')
model = tfidfmodel.TfidfModel(self.corpus, smartirs="nfc")
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence between Gensim v3.2.0 and current model.
model3 = tfidfmodel.TfidfModel(self.corpus, smartirs="nfc")
model4 = tfidfmodel.TfidfModel.load(datapath('tfidf_model.tst'))
idfs3 = [model3.idfs[key] for key in sorted(model3.idfs.keys())]
idfs4 = [model4.idfs[key] for key in sorted(model4.idfs.keys())]
self.assertTrue(np.allclose(idfs3, idfs4))
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model3[tstvec[0]], model4[tstvec[0]]))
self.assertTrue(np.allclose(model3[tstvec[1]], model4[tstvec[1]]))
self.assertTrue(np.allclose(model3[[]], model4[[]])) # try projecting an empty vector
# Test persistence with using pivoted normalization
fname = get_tmpfile('gensim_models_smartirs.tst')
model = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=1)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
# Test persistence between Gensim v3.2.0 and pivoted normalization compressed model.
model3 = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=1)
model4 = tfidfmodel.TfidfModel.load(datapath('tfidf_model.tst'))
idfs3 = [model3.idfs[key] for key in sorted(model3.idfs.keys())]
idfs4 = [model4.idfs[key] for key in sorted(model4.idfs.keys())]
self.assertTrue(np.allclose(idfs3, idfs4))
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model3[tstvec[0]], model4[tstvec[0]]))
self.assertTrue(np.allclose(model3[tstvec[1]], model4[tstvec[1]]))
def test_persistence_compressed(self):
# Test persistence without using `smartirs`
fname = get_tmpfile('gensim_models.tst.gz')
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence with using `smartirs`
fname = get_tmpfile('gensim_models_smartirs.tst.gz')
model = tfidfmodel.TfidfModel(self.corpus, smartirs="nfc")
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence between Gensim v3.2.0 and current compressed model.
model3 = tfidfmodel.TfidfModel(self.corpus, smartirs="nfc")
model4 = tfidfmodel.TfidfModel.load(datapath('tfidf_model.tst.bz2'))
idfs3 = [model3.idfs[key] for key in sorted(model3.idfs.keys())]
idfs4 = [model4.idfs[key] for key in sorted(model4.idfs.keys())]
self.assertTrue(np.allclose(idfs3, idfs4))
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model3[tstvec[0]], model4[tstvec[0]]))
self.assertTrue(np.allclose(model3[tstvec[1]], model4[tstvec[1]]))
self.assertTrue(np.allclose(model3[[]], model4[[]])) # try projecting an empty vector
# Test persistence with using pivoted normalization
fname = get_tmpfile('gensim_models_smartirs.tst.gz')
model = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=1)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
# Test persistence between Gensim v3.2.0 and pivoted normalization compressed model.
model3 = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=1)
model4 = tfidfmodel.TfidfModel.load(datapath('tfidf_model.tst.bz2'))
idfs3 = [model3.idfs[key] for key in sorted(model3.idfs.keys())]
idfs4 = [model4.idfs[key] for key in sorted(model4.idfs.keys())]
self.assertTrue(np.allclose(idfs3, idfs4))
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model3[tstvec[0]], model4[tstvec[0]]))
self.assertTrue(np.allclose(model3[tstvec[1]], model4[tstvec[1]]))
def test_consistency(self):
docs = [corpus[1], corpus[2]]
# Test if `ntc` yields the default docs.
model = tfidfmodel.TfidfModel(corpus, smartirs='nfc')
transformed_docs = [model[docs[0]], model[docs[1]]]
model = tfidfmodel.TfidfModel(corpus)
expected_docs = [model[docs[0]], model[docs[1]]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# Testing all the variations of `wlocal`
# tnn
model = tfidfmodel.TfidfModel(corpus, smartirs='tnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = docs[:]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# nnn
model = tfidfmodel.TfidfModel(corpus, smartirs='nnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = docs[:]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# lnn
model = tfidfmodel.TfidfModel(corpus, smartirs='lnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[(3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0), (8, 1.0)],
[(5, 2.0), (9, 1.0), (10, 1.0)]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# dnn
model = tfidfmodel.TfidfModel(corpus, smartirs='dnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[(3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0), (8, 1.0)],
[(5, 2.0), (9, 1.0), (10, 1.0)]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# ann
model = tfidfmodel.TfidfModel(corpus, smartirs='ann')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[(3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0), (8, 1.0)],
[(5, 1.0), (9, 0.75), (10, 0.75)]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# bnn
model = tfidfmodel.TfidfModel(corpus, smartirs='bnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[(3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1)],
[(5, 1), (9, 1), (10, 1)]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# Lnn
model = tfidfmodel.TfidfModel(corpus, smartirs='Lnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0),
(7, 1.0), (8, 1.0)
],
[
(5, 1.4133901052), (9, 0.7066950526), (10, 0.7066950526)
]
]
# Testing all the variations of `glocal`
# nxn
model = tfidfmodel.TfidfModel(corpus, smartirs='nxn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = docs[:]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# nfn
model = tfidfmodel.TfidfModel(corpus, smartirs='nfn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(3, 3.169925001442312), (4, 3.169925001442312), (5, 1.584962500721156), (6, 3.169925001442312),
(7, 3.169925001442312), (8, 2.169925001442312)
],
[
(5, 3.169925001442312), (9, 3.169925001442312), (10, 3.169925001442312)
]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# ntn
model = tfidfmodel.TfidfModel(corpus, smartirs='ntn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(3, 3.321928094887362), (4, 3.321928094887362), (5, 1.736965594166206), (6, 3.321928094887362),
(7, 3.321928094887362), (8, 2.321928094887362)
],
[
(5, 3.473931188332412), (9, 3.321928094887362), (10, 3.321928094887362)
]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# npn
model = tfidfmodel.TfidfModel(corpus, smartirs='npn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(3, 3.0), (4, 3.0), (5, 1.0), (6, 3.0),
(7, 3.0), (8, 1.8073549220576042)
],
[
(5, 2.0), (9, 3.0), (10, 3.0)
]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# Testing all the variations of `normalize`
# nnx
model = tfidfmodel.TfidfModel(corpus, smartirs='nnx')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = docs[:]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# nnc
model = tfidfmodel.TfidfModel(corpus, smartirs='nnc')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(3, 0.4082482905), (4, 0.4082482905), (5, 0.4082482905), (6, 0.4082482905),
(7, 0.4082482905), (8, 0.4082482905)
],
[
(5, 0.81649658092772603), (9, 0.40824829046386302), (10, 0.40824829046386302)
]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
model = tfidfmodel.TfidfModel(corpus, wlocal=lambda x: x, wglobal=lambda x, y: x * x, smartirs='nnc')
transformed_docs = [model[docs[0]], model[docs[1]]]
model = tfidfmodel.TfidfModel(corpus, wlocal=lambda x: x * x, wglobal=lambda x, y: x, smartirs='nnc')
expected_docs = [model[docs[0]], model[docs[1]]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# nnu
slope = 0.2
model = tfidfmodel.TfidfModel(corpus, smartirs='nnu', slope=slope)
transformed_docs = [model[docs[0]], model[docs[1]]]
average_unique_length = 1.0 * sum(len(set(text)) for text in texts) / len(texts)
vector_norms = [
(1.0 - slope) * average_unique_length + slope * 6.0,
(1.0 - slope) * average_unique_length + slope * 3.0,
]
expected_docs = [
[(termid, weight / vector_norms[0]) for termid, weight in docs[0]],
[(termid, weight / vector_norms[1]) for termid, weight in docs[1]],
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# nnb
slope = 0.2
model = tfidfmodel.TfidfModel(dictionary=dictionary, smartirs='nnb', slope=slope)
transformed_docs = [model[docs[0]], model[docs[1]]]
average_character_length = sum(len(word) + 1.0 for text in texts for word in text) / len(texts)
vector_norms = [
(1.0 - slope) * average_character_length + slope * 36.0,
(1.0 - slope) * average_character_length + slope * 25.0,
]
expected_docs = [
[(termid, weight / vector_norms[0]) for termid, weight in docs[0]],
[(termid, weight / vector_norms[1]) for termid, weight in docs[1]],
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
def test_pivoted_normalization(self):
docs = [corpus[1], corpus[2]]
# Test if slope=1 yields the default docs for pivoted normalization.
model = tfidfmodel.TfidfModel(self.corpus)
transformed_docs = [model[docs[0]], model[docs[1]]]
model = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=1)
expected_docs = [model[docs[0]], model[docs[1]]]
self.assertTrue(np.allclose(sorted(transformed_docs[0]), sorted(expected_docs[0])))
self.assertTrue(np.allclose(sorted(transformed_docs[1]), sorted(expected_docs[1])))
# Test if pivoted model is consistent
model = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=0.5)
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(8, 0.8884910505493495), (7, 0.648974041227711), (6, 0.8884910505493495),
(5, 0.648974041227711), (4, 0.8884910505493495), (3, 0.8884910505493495)
],
[
(10, 0.8164965809277263), (9, 0.8164965809277263), (5, 1.6329931618554525)
]
]
self.assertTrue(np.allclose(sorted(transformed_docs[0]), sorted(expected_docs[0])))
self.assertTrue(np.allclose(sorted(transformed_docs[1]), sorted(expected_docs[1])))
def test_wlocal_wglobal(self):
def wlocal(tf):
assert isinstance(tf, np.ndarray)
return iter(tf + 1)
def wglobal(df, total_docs):
return 1
docs = [corpus[1], corpus[2]]
model = tfidfmodel.TfidfModel(corpus, wlocal=wlocal, wglobal=wglobal, normalize=False)
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[(termid, weight + 1) for termid, weight in docs[0]],
[(termid, weight + 1) for termid, weight in docs[1]],
]
self.assertTrue(np.allclose(sorted(transformed_docs[0]), sorted(expected_docs[0])))
self.assertTrue(np.allclose(sorted(transformed_docs[1]), sorted(expected_docs[1])))
def test_backwards_compatibility(self):
model = tfidfmodel.TfidfModel.load(datapath('tfidf_model_3_2.tst'))
# attrs ensured by load method
attrs = ['pivot', 'slope', 'smartirs']
for a in attrs:
self.assertTrue(hasattr(model, a))
# __getitem__: assumes smartirs attr is present
self.assertEqual(len(model[corpus]), len(corpus))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| TestTfidfModel |
python | google__pytype | pytype/abstract/_instances.py | {
"start": 10493,
"end": 13118
} | class ____(_instance_base.Instance, mixin.PythonConstant):
"""Representation of Python 'tuple' objects."""
def __init__(
self, content: Sequence[cfg.Variable], ctx: "context.Context"
) -> None:
combined_content = ctx.convert.build_content(
content
) # pytype: disable=wrong-arg-types
class_params = {
name: ctx.convert.merge_classes(instance_param.data)
for name, instance_param in tuple(enumerate(content))
+ ((abstract_utils.T, combined_content),)
}
cls = _abstract.TupleClass(ctx.convert.tuple_type, class_params, ctx)
super().__init__(cls, ctx)
mixin.PythonConstant.init_mixin(self, content)
self._hash = None # memoized due to expensive computation
self.tuple_length = len(self.pyval)
self.merge_instance_type_parameter( # pytype: disable=wrong-arg-types
None, abstract_utils.T, combined_content
)
# set this to true when creating a function arg tuple
self.is_unpacked_function_args = False
def str_of_constant(self, printer: Callable[[_base.BaseValue], str]) -> str:
content = ", ".join(
" or ".join(_var_map(printer, val)) for val in self.pyval
)
if self.tuple_length == 1:
content += ","
return f"({content})"
def _unique_parameters(self) -> "list[cfg.Variable]":
parameters = super()._unique_parameters()
parameters.extend(self.pyval)
return parameters
def _is_recursive(self) -> bool:
"""True if the tuple contains itself."""
return any(any(x is self for x in e.data) for e in self.pyval)
def __eq__(self, other) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
elif self.tuple_length != other.tuple_length:
return False
# If we find a tuple that contains itself, fall back to comparing hashes.
if self._is_recursive() or other._is_recursive():
return self._hash == other._hash
# Otherwise do an elementwise comparison.
return all(
e.data == other_e.data for e, other_e in zip(self.pyval, other.pyval)
)
def __hash__(self) -> int:
if self._hash is None:
# Descending into pyval would trigger infinite recursion in the case of a
# tuple containing itself, so we approximate the inner values with their
# full names.
approximate_hash = lambda var: tuple(v.full_name for v in var.data)
self._hash = hash(
(self.tuple_length,) + tuple(approximate_hash(e) for e in self.pyval)
)
return self._hash
def get_fullhash(self, seen: set[int] | None = None) -> int:
return _get_concrete_sequence_fullhash(self, seen)
| Tuple |
python | getsentry__sentry | src/sentry/snuba/metrics/extraction.py | {
"start": 12668,
"end": 19902
} | class ____(TypedDict):
#: A list of Metric Resource Identifiers (MRI) to apply tags to.
#:
#: Entries in this list can contain wildcards to match metrics with dynamic MRIs.
metrics: list[str]
#: A list of tags to add to the metric.
#:
#: Tags can be conditional, see `TagSpec` for configuration options. For this reason, it is
#: possible to list tag keys multiple times, each with different conditions. The first matching
#: condition will be applied.
tags: list[TagSpec]
def _check_event_type_transaction(
query: Sequence[QueryToken], is_top_level_call: bool = True
) -> bool:
transaction_filter = False
for token in query:
if isinstance(token, SearchFilter):
if token.key.name == "event.type" and token.value.value == "transaction":
transaction_filter = True
break
elif isinstance(token, ParenExpression):
contains_transaction = _check_event_type_transaction(
token.children, is_top_level_call=False
)
if contains_transaction:
transaction_filter = True
break
# Only if we are top level call, and we didn't find any transaction filter, we throw an exception, otherwise it
# means we are in a nested expression and not finding a transaction doesn't mean we never found it.
if is_top_level_call and not transaction_filter:
raise ValueError("event.type:transaction not found in the query")
return transaction_filter
def _transform_search_filter(search_filter: SearchFilter) -> SearchFilter:
# If we have `message:something` we convert it to `message:*something*` since we want to perform `contains` matching
# exactly how discover does it.
if search_filter.key.name == "message":
return SearchFilter(
key=SearchKey(name=search_filter.key.name),
operator=search_filter.operator,
value=SearchValue(raw_value=f"*{search_filter.value.raw_value}*"),
)
# If we have `transaction.status:unknown_error` we convert it to `transaction.status:unknown` since we need to be
# backward compatible.
if (
search_filter.key.name == "transaction.status"
and search_filter.value.raw_value == "unknown_error"
):
return SearchFilter(
key=SearchKey(name=search_filter.key.name),
operator=search_filter.operator,
value=SearchValue(raw_value="unknown"),
)
return search_filter
def _transform_search_query(query: Sequence[QueryToken]) -> list[QueryToken]:
transformed_query: list[QueryToken] = []
for token in query:
if isinstance(token, SearchFilter):
transformed_query.append(_transform_search_filter(token))
elif isinstance(token, ParenExpression):
transformed_query.append(ParenExpression(_transform_search_query(token.children)))
else:
transformed_query.append(token)
return transformed_query
@metrics.wraps("metrics.extraction.parse_search_query")
def parse_search_query(
query: str,
removed_blacklisted: bool = False,
force_transaction_event_type: bool = False,
) -> Sequence[QueryToken]:
"""
Parses a search query with the discover grammar and performs some transformations on the AST in order to account for
edge cases.
"""
tokens = event_search.parse_search_query(query)
# We might want to force the `event.type:transaction` to be in the query, as a validation step.
if force_transaction_event_type:
_check_event_type_transaction(tokens)
# As first step, we transform the search query by applying basic transformations.
tokens = _transform_search_query(tokens)
# As second step, if enabled, we remove elements from the query which are blacklisted.
if removed_blacklisted:
tokens = cleanup_search_query(_remove_blacklisted_search_filters(tokens))
return tokens
def cleanup_search_query(tokens: Sequence[QueryToken]) -> list[QueryToken]:
"""
Recreates a valid query from an original query that has had on demand search filters removed.
When removing filters from a query it is possible to create invalid queries.
For example removing the on demand filters from "transaction.duration:>=1s OR browser.version:1 AND environment:dev"
would result in "OR AND environment:dev" which is not a valid query this should be cleaned to "environment:dev.
"release:internal and browser.version:1 or os.name:android" => "release:internal or and os.name:android" which
would be cleaned to "release:internal or os.name:android"
"""
tokens = list(tokens)
# remove empty parens
removed_empty_parens: list[QueryToken] = []
for token in tokens:
if not isinstance(token, ParenExpression):
removed_empty_parens.append(token)
else:
children = cleanup_search_query(token.children)
if len(children) > 0:
removed_empty_parens.append(ParenExpression(children))
# remove AND and OR operators at the start of the query
while len(removed_empty_parens) > 0 and isinstance(removed_empty_parens[0], str):
removed_empty_parens.pop(0)
# remove AND and OR operators at the end of the query
while len(removed_empty_parens) > 0 and isinstance(removed_empty_parens[-1], str):
removed_empty_parens.pop()
# remove AND and OR operators that are next to each other
ret_val = []
previous_token: QueryToken | None = None
for token in removed_empty_parens:
# this loop takes care of removing consecutive AND/OR operators (keeping only one of them)
if isinstance(token, str) and isinstance(previous_token, str):
token = cast(QueryOp, token.upper())
# this handles two AND/OR operators next to each other, we must drop one of them
# if we have an AND do nothing (AND will be merged in the previous token see comment below)
# if we have an OR the resulting operator will be an OR
# AND OR => OR
# OR OR => OR
# OR AND => OR
# AND AND => AND
if token == "OR":
previous_token = "OR"
continue
elif previous_token is not None:
ret_val.append(previous_token)
previous_token = token
# take care of the last token (if any)
if previous_token is not None:
ret_val.append(previous_token)
return ret_val
def _parse_function(aggregate: str) -> tuple[str, list[str], str]:
"""
Parses an aggregate and returns its components.
This function is a slightly modified version of the `parse_function` method of the query builders.
"""
match = fields.is_function(aggregate)
if not match:
raise InvalidSearchQuery(f"Invalid characters in field {aggregate}")
function = match.group("function")
arguments = fields.parse_arguments(function, match.group("columns"))
alias = match.group("alias")
if alias is None:
alias = fields.get_function_alias_with_columns(function, arguments)
return function, arguments, alias
@dataclass(frozen=True)
| TagMapping |
python | getsentry__sentry | tests/sentry/preprod/test_models.py | {
"start": 243,
"end": 708
} | class ____(TestCase):
"""Base test class with common setup for PreprodArtifact model tests."""
def setUp(self):
super().setUp()
self.organization = self.create_organization(owner=self.user)
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(
teams=[self.team], organization=self.organization, name="test_project"
)
@region_silo_test
| PreprodArtifactModelTestBase |
python | huggingface__transformers | tests/models/efficientloftr/test_modeling_efficientloftr.py | {
"start": 1368,
"end": 5183
} | class ____:
def __init__(
self,
parent,
batch_size=2,
image_width=6, # need to be a multiple of `stage_stride[0] * stage_stride[1]`
image_height=4, # need to be a multiple of `stage_stride[0] * stage_stride[1]`
stage_num_blocks: list[int] = [1, 1],
out_features: list[int] = [16, 16], # need to be >= 2 to make `config.fine_fusion_dims > 0`
stage_stride: list[int] = [2, 1],
q_aggregation_kernel_size: int = 1,
kv_aggregation_kernel_size: int = 1,
q_aggregation_stride: int = 1,
kv_aggregation_stride: int = 1,
num_attention_layers: int = 2,
num_attention_heads: int = 8,
hidden_size: int = 16,
coarse_matching_threshold: float = 0.0,
fine_kernel_size: int = 2,
coarse_matching_border_removal: int = 0,
):
self.parent = parent
self.batch_size = batch_size
self.image_width = image_width
self.image_height = image_height
self.stage_num_blocks = stage_num_blocks
self.out_features = out_features
self.stage_stride = stage_stride
self.q_aggregation_kernel_size = q_aggregation_kernel_size
self.kv_aggregation_kernel_size = kv_aggregation_kernel_size
self.q_aggregation_stride = q_aggregation_stride
self.kv_aggregation_stride = kv_aggregation_stride
self.num_attention_layers = num_attention_layers
self.num_attention_heads = num_attention_heads
self.hidden_size = hidden_size
self.coarse_matching_threshold = coarse_matching_threshold
self.coarse_matching_border_removal = coarse_matching_border_removal
self.fine_kernel_size = fine_kernel_size
def prepare_config_and_inputs(self):
# EfficientLoFTR expects a grayscale image as input
pixel_values = floats_tensor([self.batch_size, 2, 3, self.image_height, self.image_width])
config = self.get_config()
return config, pixel_values
def get_config(self):
return EfficientLoFTRConfig(
stage_num_blocks=self.stage_num_blocks,
out_features=self.out_features,
stage_stride=self.stage_stride,
q_aggregation_kernel_size=self.q_aggregation_kernel_size,
kv_aggregation_kernel_size=self.kv_aggregation_kernel_size,
q_aggregation_stride=self.q_aggregation_stride,
kv_aggregation_stride=self.kv_aggregation_stride,
num_attention_layers=self.num_attention_layers,
num_attention_heads=self.num_attention_heads,
hidden_size=self.hidden_size,
coarse_matching_threshold=self.coarse_matching_threshold,
coarse_matching_border_removal=self.coarse_matching_border_removal,
fine_kernel_size=self.fine_kernel_size,
)
def create_and_check_model(self, config, pixel_values):
model = EfficientLoFTRForKeypointMatching(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
maximum_num_matches = result.matches.shape[-1]
self.parent.assertEqual(
result.keypoints.shape,
(self.batch_size, 2, maximum_num_matches, 2),
)
self.parent.assertEqual(
result.matches.shape,
(self.batch_size, 2, maximum_num_matches),
)
self.parent.assertEqual(
result.matching_scores.shape,
(self.batch_size, 2, maximum_num_matches),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| EfficientLoFTRModelTester |
python | kubernetes-client__python | kubernetes/base/dynamic/exceptions.py | {
"start": 3415,
"end": 3507
} | class ____(DynamicApiError):
""" 422: StatusUnprocessibleEntity """
| UnprocessibleEntityError |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_write_borders.py | {
"start": 332,
"end": 1015
} | class ____(unittest.TestCase):
"""
Test the Styles _write_borders() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_borders(self):
"""Test the _write_borders() method"""
xf_format = Format()
xf_format.has_border = True
self.styles._set_style_properties([[xf_format], None, 0, 0, 1, 0, [], [], 0])
self.styles._write_borders()
exp = """<borders count="1"><border><left/><right/><top/><bottom/><diagonal/></border></borders>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteBorders |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_ec2.py | {
"start": 3408,
"end": 5505
} | class ____(BaseEc2TestClass):
def test_init(self):
ec2_operator = EC2TerminateInstanceOperator(
task_id="test_terminate_instance",
instance_ids="test_image_id",
)
assert ec2_operator.task_id == "test_terminate_instance"
assert ec2_operator.max_attempts == 20
assert ec2_operator.poll_interval == 20
@mock_aws
def test_terminate_instance(self):
ec2_hook = EC2Hook()
create_instance = EC2CreateInstanceOperator(
image_id=self._get_image_id(ec2_hook),
task_id="test_create_instance",
)
instance_id = create_instance.execute(None)
assert ec2_hook.get_instance_state(instance_id=instance_id[0]) == "running"
terminate_instance = EC2TerminateInstanceOperator(
task_id="test_terminate_instance", instance_ids=instance_id
)
terminate_instance.execute(None)
assert ec2_hook.get_instance_state(instance_id=instance_id[0]) == "terminated"
@mock_aws
def test_terminate_multiple_instances(self):
ec2_hook = EC2Hook()
create_instances = EC2CreateInstanceOperator(
task_id="test_create_multiple_instances",
image_id=self._get_image_id(hook=ec2_hook),
min_count=5,
max_count=5,
)
instance_ids = create_instances.execute(None)
assert len(instance_ids) == 5
for id in instance_ids:
assert ec2_hook.get_instance_state(instance_id=id) == "running"
terminate_instance = EC2TerminateInstanceOperator(
task_id="test_terminate_instance", instance_ids=instance_ids
)
terminate_instance.execute(None)
for id in instance_ids:
assert ec2_hook.get_instance_state(instance_id=id) == "terminated"
def test_template_fields(self):
ec2_operator = EC2TerminateInstanceOperator(
task_id="test_terminate_instance",
instance_ids="test_image_id",
)
validate_template_fields(ec2_operator)
| TestEC2TerminateInstanceOperator |
python | networkx__networkx | networkx/algorithms/tests/test_max_weight_clique.py | {
"start": 80,
"end": 6739
} | class ____:
def test_basic_cases(self):
def check_basic_case(graph_func, expected_weight, weight_accessor):
graph = graph_func()
clique, weight = nx.algorithms.max_weight_clique(graph, weight_accessor)
assert verify_clique(
graph, clique, weight, expected_weight, weight_accessor
)
for graph_func, (expected_weight, expected_size) in TEST_CASES.items():
check_basic_case(graph_func, expected_weight, "weight")
check_basic_case(graph_func, expected_size, None)
def test_key_error(self):
graph = two_node_graph()
with pytest.raises(KeyError):
nx.algorithms.max_weight_clique(graph, "nonexistent-key")
def test_error_on_non_integer_weight(self):
graph = two_node_graph()
graph.nodes[2]["weight"] = 1.5
with pytest.raises(ValueError):
nx.algorithms.max_weight_clique(graph)
def test_unaffected_by_self_loops(self):
graph = two_node_graph()
graph.add_edge(1, 1)
graph.add_edge(2, 2)
clique, weight = nx.algorithms.max_weight_clique(graph, "weight")
assert verify_clique(graph, clique, weight, 30, "weight")
graph = three_node_independent_set()
graph.add_edge(1, 1)
clique, weight = nx.algorithms.max_weight_clique(graph, "weight")
assert verify_clique(graph, clique, weight, 20, "weight")
def test_30_node_prob(self):
G = nx.Graph()
G.add_nodes_from(range(1, 31))
for i in range(1, 31):
G.nodes[i]["weight"] = i + 1
# fmt: off
G.add_edges_from(
[
(1, 12), (1, 13), (1, 15), (1, 16), (1, 18), (1, 19), (1, 20),
(1, 23), (1, 26), (1, 28), (1, 29), (1, 30), (2, 3), (2, 4),
(2, 5), (2, 8), (2, 9), (2, 10), (2, 14), (2, 17), (2, 18),
(2, 21), (2, 22), (2, 23), (2, 27), (3, 9), (3, 15), (3, 21),
(3, 22), (3, 23), (3, 24), (3, 27), (3, 28), (3, 29), (4, 5),
(4, 6), (4, 8), (4, 21), (4, 22), (4, 23), (4, 26), (4, 28),
(4, 30), (5, 6), (5, 8), (5, 9), (5, 13), (5, 14), (5, 15),
(5, 16), (5, 20), (5, 21), (5, 22), (5, 25), (5, 28), (5, 29),
(6, 7), (6, 8), (6, 13), (6, 17), (6, 18), (6, 19), (6, 24),
(6, 26), (6, 27), (6, 28), (6, 29), (7, 12), (7, 14), (7, 15),
(7, 16), (7, 17), (7, 20), (7, 25), (7, 27), (7, 29), (7, 30),
(8, 10), (8, 15), (8, 16), (8, 18), (8, 20), (8, 22), (8, 24),
(8, 26), (8, 27), (8, 28), (8, 30), (9, 11), (9, 12), (9, 13),
(9, 14), (9, 15), (9, 16), (9, 19), (9, 20), (9, 21), (9, 24),
(9, 30), (10, 12), (10, 15), (10, 18), (10, 19), (10, 20),
(10, 22), (10, 23), (10, 24), (10, 26), (10, 27), (10, 29),
(10, 30), (11, 13), (11, 15), (11, 16), (11, 17), (11, 18),
(11, 19), (11, 20), (11, 22), (11, 29), (11, 30), (12, 14),
(12, 17), (12, 18), (12, 19), (12, 20), (12, 21), (12, 23),
(12, 25), (12, 26), (12, 30), (13, 20), (13, 22), (13, 23),
(13, 24), (13, 30), (14, 16), (14, 20), (14, 21), (14, 22),
(14, 23), (14, 25), (14, 26), (14, 27), (14, 29), (14, 30),
(15, 17), (15, 18), (15, 20), (15, 21), (15, 26), (15, 27),
(15, 28), (16, 17), (16, 18), (16, 19), (16, 20), (16, 21),
(16, 29), (16, 30), (17, 18), (17, 21), (17, 22), (17, 25),
(17, 27), (17, 28), (17, 30), (18, 19), (18, 20), (18, 21),
(18, 22), (18, 23), (18, 24), (19, 20), (19, 22), (19, 23),
(19, 24), (19, 25), (19, 27), (19, 30), (20, 21), (20, 23),
(20, 24), (20, 26), (20, 28), (20, 29), (21, 23), (21, 26),
(21, 27), (21, 29), (22, 24), (22, 25), (22, 26), (22, 29),
(23, 25), (23, 30), (24, 25), (24, 26), (25, 27), (25, 29),
(26, 27), (26, 28), (26, 30), (28, 29), (29, 30),
]
)
# fmt: on
clique, weight = nx.algorithms.max_weight_clique(G)
assert verify_clique(G, clique, weight, 111, "weight")
# ############################ Utility functions ############################
def verify_clique(
graph, clique, reported_clique_weight, expected_clique_weight, weight_accessor
):
for node1 in clique:
for node2 in clique:
if node1 == node2:
continue
if not graph.has_edge(node1, node2):
return False
if weight_accessor is None:
clique_weight = len(clique)
else:
clique_weight = sum(graph.nodes[v]["weight"] for v in clique)
if clique_weight != expected_clique_weight:
return False
if clique_weight != reported_clique_weight:
return False
return True
# ############################ Graph Generation ############################
def empty_graph():
return nx.Graph()
def one_node_graph():
graph = nx.Graph()
graph.add_nodes_from([1])
graph.nodes[1]["weight"] = 10
return graph
def two_node_graph():
graph = nx.Graph()
graph.add_nodes_from([1, 2])
graph.add_edges_from([(1, 2)])
graph.nodes[1]["weight"] = 10
graph.nodes[2]["weight"] = 20
return graph
def three_node_clique():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3])
graph.add_edges_from([(1, 2), (1, 3), (2, 3)])
graph.nodes[1]["weight"] = 10
graph.nodes[2]["weight"] = 20
graph.nodes[3]["weight"] = 5
return graph
def three_node_independent_set():
graph = nx.Graph()
graph.add_nodes_from([1, 2, 3])
graph.nodes[1]["weight"] = 10
graph.nodes[2]["weight"] = 20
graph.nodes[3]["weight"] = 5
return graph
def disconnected():
graph = nx.Graph()
graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)])
graph.nodes[1]["weight"] = 10
graph.nodes[2]["weight"] = 20
graph.nodes[3]["weight"] = 5
graph.nodes[4]["weight"] = 100
graph.nodes[5]["weight"] = 200
graph.nodes[6]["weight"] = 50
return graph
# --------------------------------------------------------------------------
# Basic tests for all strategies
# For each basic graph function, specify expected weight of max weight clique
# and expected size of maximum clique
TEST_CASES = {
empty_graph: (0, 0),
one_node_graph: (10, 1),
two_node_graph: (30, 2),
three_node_clique: (35, 3),
three_node_independent_set: (20, 1),
disconnected: (300, 2),
}
| TestMaximumWeightClique |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-snowflake-cortex/destination_snowflake_cortex/destination.py | {
"start": 727,
"end": 3568
} | class ____(Destination):
sql_processor: cortex_processor.SnowflakeCortexSqlProcessor
def _init_sql_processor(
self, config: ConfigModel, configured_catalog: Optional[ConfiguredAirbyteCatalog] = None
):
self.sql_processor = cortex_processor.SnowflakeCortexSqlProcessor(
sql_config=cortex_processor.SnowflakeCortexConfig(
host=config.indexing.host,
role=config.indexing.role,
warehouse=config.indexing.warehouse,
database=config.indexing.database,
schema_name=config.indexing.default_schema,
username=config.indexing.username,
password=SecretString(config.indexing.credentials.password),
),
splitter_config=config.processing,
embedder_config=config.embedding, # type: ignore [arg-type] # No common base class
catalog_provider=CatalogProvider(configured_catalog),
temp_dir=Path(tempfile.mkdtemp()),
temp_file_cleanup=True,
)
def write(
self,
config: Mapping[str, Any],
configured_catalog: ConfiguredAirbyteCatalog,
input_messages: Iterable[AirbyteMessage],
) -> Iterable[AirbyteMessage]:
parsed_config = ConfigModel.parse_obj(config)
self._init_sql_processor(config=parsed_config, configured_catalog=configured_catalog)
yield from self.sql_processor.process_airbyte_messages_as_generator(
messages=input_messages,
write_strategy=WriteStrategy.AUTO,
# TODO: Ensure this setting is covered, then delete the commented-out line:
# omit_raw_text=parsed_config.omit_raw_text,
)
def check(self, logger: Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
_ = logger # Unused
try:
parsed_config = ConfigModel.parse_obj(config)
self._init_sql_processor(config=parsed_config)
self.sql_processor.sql_config.connect()
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(
status=Status.FAILED, message=f"An exception occurred: {repr(e)}"
)
def spec(self, *args: Any, **kwargs: Any) -> ConnectorSpecification:
return ConnectorSpecification(
documentationUrl="https://docs.airbyte.com/integrations/destinations/snowflake-cortex",
supportsIncremental=True,
supported_destination_sync_modes=[
DestinationSyncMode.overwrite,
DestinationSyncMode.append,
DestinationSyncMode.append_dedup,
],
connectionSpecification=ConfigModel.schema(), # type: ignore[attr-defined]
)
| DestinationSnowflakeCortex |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 210263,
"end": 211435
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self, name: str, client_id: str, client_secret: str, refresh_token: str, start_date: str
):
"""Airbyte Source for Salesloft.
Documentation can be found at https://docs.airbyte.com/integrations/sources/salesloft
Args:
name (str): The name of the destination.
client_id (str): The Client ID of your Salesloft developer application.
client_secret (str): The Client Secret of your Salesloft developer application.
refresh_token (str): The token for obtaining a new access token.
start_date (str): The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.
"""
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
self.start_date = check.str_param(start_date, "start_date")
super().__init__("Salesloft", name)
| SalesloftSource |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 42461,
"end": 65880
} | class ____:
"""Collection container for groupby aggregations
The purpose of this class is to expose an API similar
to Pandas' `Groupby` for dask-expr collections.
See Also
--------
SingleAggregation
"""
def __init__(
self,
obj,
by,
group_keys=True,
sort=None,
observed=None,
dropna=None,
slice=None,
):
if isinstance(by, (tuple, list)):
by = [_clean_by_expr(obj, x) for x in by]
else:
by = _clean_by_expr(obj, by)
by_ = by if isinstance(by, (tuple, list)) else [by]
if any(isinstance(key, pd.Grouper) for key in by_):
raise NotImplementedError("pd.Grouper is currently not supported by Dask.")
self._slice = slice
# Check if we can project columns
projection = None
if (
np.isscalar(slice)
or isinstance(slice, (str, list, tuple))
or (
(is_index_like(slice) or is_series_like(slice))
and not is_dask_collection(slice)
)
):
projection = set(by_).union(
{slice} if (np.isscalar(slice) or isinstance(slice, str)) else slice
)
projection = [c for c in obj.columns if c in projection]
self.obj = obj[projection] if projection is not None else obj
self.sort = sort
self.observed = (
observed if observed is not None else False if not PANDAS_GE_300 else True
)
self.dropna = dropna
self.group_keys = group_keys
self.by = (
[by] if np.isscalar(by) or isinstance(by, (Expr, Callable)) else list(by)
)
# surface pandas errors
self._meta = self.obj._meta.groupby(
by,
group_keys=group_keys,
sort=sort,
**_as_dict("observed", observed),
**_as_dict("dropna", dropna),
)
if slice is not None:
if isinstance(slice, tuple):
slice = list(slice)
self._meta = self._meta[slice]
def _numeric_only_kwargs(self, numeric_only):
kwargs = {"numeric_only": numeric_only}
return {"chunk_kwargs": kwargs.copy(), "aggregate_kwargs": kwargs.copy()}
def _single_agg(
self,
expr_cls,
split_every=None,
split_out=None,
chunk_kwargs=None,
aggregate_kwargs=None,
shuffle_method=None,
):
if split_every is None:
split_every = 8
return new_collection(
expr_cls(
self.obj.expr,
self.observed,
self.dropna,
chunk_kwargs,
aggregate_kwargs,
self._slice,
split_every,
split_out,
self.sort,
get_specified_shuffle(shuffle_method),
*self.by,
)
)
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e) from e
def __dir__(self):
return sorted(
set(
dir(type(self))
+ list(self.__dict__)
+ list(filter(M.isidentifier, self.obj.columns))
)
)
def compute(self, **kwargs):
raise NotImplementedError(
"DataFrameGroupBy does not allow compute method."
"Please chain it with an aggregation method (like ``.mean()``) or get a "
"specific group using ``.get_group()`` before calling ``compute()``"
)
def __getitem__(self, key):
if is_scalar(key):
return SeriesGroupBy(
self.obj,
by=self.by,
group_keys=self.group_keys,
slice=key,
sort=self.sort,
dropna=self.dropna,
observed=self.observed,
)
g = GroupBy(
self.obj,
by=self.by,
slice=key,
sort=self.sort,
dropna=self.dropna,
observed=self.observed,
group_keys=self.group_keys,
)
return g
@derived_from(
pd.core.groupby.GroupBy,
inconsistencies="If the group is not present, Dask will return an empty Series/DataFrame.",
)
def get_group(self, key):
return new_collection(GetGroup(self.obj.expr, key, self._slice, *self.by))
@derived_from(pd.core.groupby.GroupBy)
def count(self, **kwargs):
return self._single_agg(Count, **kwargs)
@derived_from(pd.core.groupby.GroupBy)
def sum(self, numeric_only=False, min_count=None, **kwargs):
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
result = self._single_agg(Sum, **kwargs, **numeric_kwargs)
if min_count:
return result.where(self.count() >= min_count, other=np.nan)
return result
@derived_from(pd.core.groupby.GroupBy)
def prod(self, numeric_only=False, min_count=None, **kwargs):
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
result = self._single_agg(Prod, **kwargs, **numeric_kwargs)
if min_count:
return result.where(self.count() >= min_count, other=np.nan)
return result
def _cum_agg(self, cls, numeric_only=None):
return new_collection(
cls(
self.obj.expr,
self.dropna,
self._slice,
numeric_only,
*self.by,
)
)
@derived_from(pd.core.groupby.GroupBy)
def cumsum(self, numeric_only=False):
return self._cum_agg(GroupByCumsum, numeric_only)
@derived_from(pd.core.groupby.GroupBy)
def cumprod(self, numeric_only=False):
return self._cum_agg(GroupByCumprod, numeric_only)
@derived_from(pd.core.groupby.GroupBy)
def cumcount(self):
return self._cum_agg(GroupByCumcount)
def _all_numeric(self):
"""Are all columns that we're not grouping on numeric?"""
numerics = self.obj._meta._get_numeric_data()
# This computes a groupby but only on the empty meta
post_group_columns = self._meta.count().columns
return len(set(post_group_columns) - set(numerics.columns)) == 0
@derived_from(pd.core.groupby.GroupBy)
def mean(self, numeric_only=False, split_out=None, **kwargs):
if not numeric_only and not self._all_numeric():
raise NotImplementedError(
"'numeric_only=False' is not implemented in Dask."
)
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
result = self._single_agg(Mean, split_out=split_out, **kwargs, **numeric_kwargs)
return self._postprocess_series_squeeze(result)
def _postprocess_series_squeeze(self, result):
if (
isinstance(self.obj, Series)
or is_scalar(self._slice)
and self._slice is not None
):
if len(result.columns) < 1:
raise NotImplementedError(
"Cannot call `SeriesGroupBy.var` or `SeriesGroupBy.mean` on the key "
"column. Please use `aggregate` if you really need to do this."
)
result = result[result.columns[0]]
return result
@derived_from(pd.core.groupby.GroupBy)
def min(self, numeric_only=False, **kwargs):
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
return self._single_agg(Min, **kwargs, **numeric_kwargs)
@derived_from(pd.core.groupby.GroupBy)
def max(self, numeric_only=False, **kwargs):
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
return self._single_agg(Max, **kwargs, **numeric_kwargs)
@derived_from(pd.core.groupby.GroupBy)
def first(self, numeric_only=False, sort=None, **kwargs):
if sort:
raise NotImplementedError()
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
return self._single_agg(First, **kwargs, **numeric_kwargs)
@derived_from(pd.DataFrame)
def cov(
self,
ddof=1,
split_every=None,
split_out=None,
numeric_only=False,
shuffle_method=None,
):
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
return self._single_agg(
Cov,
split_every,
split_out,
chunk_kwargs=numeric_kwargs["chunk_kwargs"],
aggregate_kwargs={"ddof": ddof},
)
@derived_from(pd.DataFrame)
def corr(
self, split_every=None, split_out=None, numeric_only=False, shuffle_method=None
):
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
return self._single_agg(
Corr,
split_every,
split_out,
chunk_kwargs=numeric_kwargs["chunk_kwargs"],
aggregate_kwargs={"ddof": 1},
)
@derived_from(pd.core.groupby.GroupBy)
def last(self, numeric_only=False, sort=None, **kwargs):
if sort:
raise NotImplementedError()
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
return self._single_agg(Last, **kwargs, **numeric_kwargs)
@derived_from(pd.core.groupby.GroupBy)
def ffill(self, limit=None, shuffle_method=None):
return self._transform_like_op(
GroupByFFill, None, limit=limit, shuffle_method=shuffle_method
)
@derived_from(pd.core.groupby.GroupBy)
def bfill(self, limit=None, shuffle_method=None):
return self._transform_like_op(
GroupByBFill, None, limit=limit, shuffle_method=shuffle_method
)
@derived_from(pd.core.groupby.GroupBy)
def size(self, **kwargs):
return self._single_agg(Size, **kwargs)
@derived_from(pd.DataFrame)
def idxmin(
self,
split_every=None,
split_out=None,
skipna=True,
numeric_only=False,
shuffle_method=None,
):
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
numeric_kwargs["chunk_kwargs"]["skipna"] = skipna
return self._single_agg(
IdxMin,
split_every=split_every,
split_out=split_out,
shuffle_method=shuffle_method,
**numeric_kwargs,
)
@derived_from(pd.DataFrame)
def idxmax(
self,
split_every=None,
split_out=None,
skipna=True,
numeric_only=False,
shuffle_method=None,
):
numeric_kwargs = self._numeric_only_kwargs(numeric_only)
numeric_kwargs["chunk_kwargs"]["skipna"] = skipna
return self._single_agg(
IdxMax,
split_every=split_every,
split_out=split_out,
shuffle_method=shuffle_method,
**numeric_kwargs,
)
@derived_from(pd.core.groupby.SeriesGroupBy)
def head(self, n=5, split_every=None, split_out=None):
chunk_kwargs = {"n": n}
aggregate_kwargs = {
"n": n,
"index_levels": len(self.by) if not isinstance(self.by, Expr) else 1,
}
return self._single_agg(
Head,
split_every=split_every,
split_out=split_out,
chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
)
@derived_from(pd.core.groupby.SeriesGroupBy)
def tail(self, n=5, split_every=None, split_out=None):
chunk_kwargs = {"n": n}
aggregate_kwargs = {
"n": n,
"index_levels": len(self.by) if not isinstance(self.by, Expr) else 1,
}
return self._single_agg(
Tail,
split_every=split_every,
split_out=split_out,
chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
)
@derived_from(pd.core.groupby.GroupBy)
def var(
self,
ddof=1,
split_every=None,
split_out=None,
numeric_only=False,
shuffle_method=None,
):
if not numeric_only and not self._all_numeric():
raise NotImplementedError(
"'numeric_only=False' is not implemented in Dask."
)
result = new_collection(
Var(
self.obj.expr,
ddof,
numeric_only,
split_out,
split_every,
self.sort,
self.dropna,
self.observed,
shuffle_method,
*self.by,
)
)
return self._postprocess_series_squeeze(result)
@derived_from(pd.core.groupby.GroupBy)
def std(
self,
ddof=1,
split_every=None,
split_out=None,
numeric_only=False,
shuffle_method=None,
):
if not numeric_only and not self._all_numeric():
raise NotImplementedError(
"'numeric_only=False' is not implemented in Dask."
)
result = new_collection(
Std(
self.obj.expr,
ddof,
numeric_only,
split_out,
split_every,
self.sort,
self.dropna,
self.observed,
shuffle_method,
*self.by,
)
)
return self._postprocess_series_squeeze(result)
@_aggregate_docstring(based_on="pd.core.groupby.DataFrameGroupBy.agg")
def aggregate(
self, arg=None, split_every=8, split_out=None, shuffle_method=None, **kwargs
):
relabeling, order, columns = None, None, None
if arg is None:
if not isinstance(self, SeriesGroupBy):
relabeling, arg, columns, order = reconstruct_func(arg, **kwargs)
elif isinstance(self, SeriesGroupBy):
columns, arg = validate_func_kwargs(kwargs)
relabeling = True
if arg == "size":
return self.size()
result = new_collection(
GroupbyAggregation(
self.obj.expr,
arg,
self.observed,
self.dropna,
split_every,
split_out,
self.sort,
shuffle_method,
self._slice,
*self.by,
)
)
if relabeling and result is not None:
if order is not None:
result = result.iloc[:, order]
result.columns = columns
return result
def agg(self, *args, **kwargs):
return self.aggregate(*args, **kwargs)
def _warn_if_no_meta(self, meta, method="apply"):
if meta is no_default:
msg = f"""`meta` is not specified, inferred from partial data.
Please provide `meta` if the result is unexpected.
Before: .{method}(func)
After: .{method}(func, meta={{'x': 'f8', 'y': 'f8'}}) for dataframe result
or: .{method}(func, meta=('x', 'f8')) for series result
"""
warnings.warn(msg, stacklevel=3)
@insert_meta_param_description(pad=12)
def apply(self, func, *args, meta=no_default, shuffle_method=None, **kwargs):
"""Parallel version of pandas GroupBy.apply
This mimics the pandas version except for the following:
1. If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
2. Dask's GroupBy.apply is not appropriate for aggregations. For custom
aggregations, use :class:`dask.dataframe.groupby.Aggregation`.
.. warning::
Pandas' groupby-apply can be used to to apply arbitrary functions,
including aggregations that result in one row per group. Dask's
groupby-apply will apply ``func`` once on each group, doing a shuffle
if needed, such that each group is contained in one partition.
When ``func`` is a reduction, e.g., you'll end up with one row
per group. To apply a custom aggregation with Dask,
use :class:`dask.dataframe.groupby.Aggregation`.
Parameters
----------
func: function
Function to apply
args, kwargs : Scalar, Delayed or object
Arguments and keywords to pass to the function.
$META
Returns
-------
applied : Series or DataFrame depending on columns keyword
"""
self._warn_if_no_meta(meta)
return new_collection(
GroupByApply(
self.obj.expr,
self.observed,
self.dropna,
self._slice,
self.group_keys,
func,
meta,
args,
kwargs,
get_specified_shuffle(shuffle_method),
*self.by,
)
)
def _transform_like_op(
self, expr_cls, func, meta=no_default, shuffle_method=None, *args, **kwargs
):
return new_collection(
expr_cls(
self.obj.expr,
self.observed,
self.dropna,
self._slice,
self.group_keys,
func,
meta,
args,
kwargs,
get_specified_shuffle(shuffle_method),
*self.by,
)
)
@insert_meta_param_description(pad=12)
def transform(self, func, meta=no_default, shuffle_method=None, *args, **kwargs):
"""Parallel version of pandas GroupBy.transform
This mimics the pandas version except for the following:
1. If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
2. Dask's GroupBy.transform is not appropriate for aggregations. For custom
aggregations, use :class:`dask.dataframe.groupby.Aggregation`.
.. warning::
Pandas' groupby-transform can be used to apply arbitrary functions,
including aggregations that result in one row per group. Dask's
groupby-transform will apply ``func`` once on each group, doing a shuffle
if needed, such that each group is contained in one partition.
When ``func`` is a reduction, e.g., you'll end up with one row
per group. To apply a custom aggregation with Dask,
use :class:`dask.dataframe.groupby.Aggregation`.
Parameters
----------
func: function
Function to apply
args, kwargs : Scalar, Delayed or object
Arguments and keywords to pass to the function.
$META
Returns
-------
applied : Series or DataFrame depending on columns keyword
"""
self._warn_if_no_meta(meta, method="transform")
return self._transform_like_op(
GroupByTransform, func, meta, shuffle_method, *args, **kwargs
)
@insert_meta_param_description(pad=12)
def shift(self, periods=1, meta=no_default, shuffle_method=None, *args, **kwargs):
"""Parallel version of pandas GroupBy.shift
This mimics the pandas version except for the following:
If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
Parameters
----------
periods : Delayed, Scalar or int, default 1
Number of periods to shift.
freq : Delayed, Scalar or str, optional
Frequency string.
fill_value : Scalar, Delayed or object, optional
The scalar value to use for newly introduced missing values.
$META
Returns
-------
shifted : Series or DataFrame shifted within each group.
Examples
--------
>>> import dask
>>> ddf = dask.datasets.timeseries(freq="1h")
>>> result = ddf.groupby("name").shift(1, meta={"id": int, "x": float, "y": float})
"""
if "axis" in kwargs:
raise TypeError("axis is not supported in shift.")
self._warn_if_no_meta(meta, method="shift")
kwargs = {"periods": periods, **kwargs}
return self._transform_like_op(
GroupByShift, None, meta, shuffle_method, *args, **kwargs
)
@derived_from(pd.core.groupby.GroupBy)
def median(
self, split_every=None, split_out=True, shuffle_method=None, numeric_only=False
):
result = new_collection(
Median(
self.obj.expr,
self.observed,
self.dropna,
self._slice,
self.group_keys,
None,
no_default,
(),
{"numeric_only": numeric_only},
get_specified_shuffle(shuffle_method),
split_every,
*self.by,
)
)
if split_out is not True:
result = result.repartition(npartitions=split_out)
return result
def rolling(self, window, min_periods=None, center=False, win_type=None, axis=0):
"""Provides rolling transformations.
.. note::
Since MultiIndexes are not well supported in Dask, this method returns a
dataframe with the same index as the original data. The groupby column is
not added as the first level of the index like pandas does.
This method works differently from other groupby methods. It does a groupby
on each partition (plus some overlap). This means that the output has the
same shape and number of partitions as the original.
Parameters
----------
window : str, offset
Size of the moving window. This is the number of observations used
for calculating the statistic. Data must have a ``DatetimeIndex``
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
Examples
--------
>>> import dask
>>> ddf = dask.datasets.timeseries(freq="1h")
>>> result = ddf.groupby("name").x.rolling('1D').max()
"""
from dask.dataframe.dask_expr._rolling import Rolling
return Rolling(
self.obj,
window,
min_periods=min_periods,
center=center,
win_type=win_type,
groupby_kwargs={
"by": self.by,
"sort": self.sort,
"observed": self.observed,
"dropna": self.dropna,
"group_keys": self.group_keys,
},
groupby_slice=self._slice,
)
| GroupBy |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 6420,
"end": 6517
} | class ____(HTTPWarning):
"""Warned when performing security reducing actions"""
| SecurityWarning |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/emr.py | {
"start": 42311,
"end": 49345
} | class ____(AwsBaseOperator[EmrServerlessHook]):
"""
Operator to create Serverless EMR Application.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrServerlessCreateApplicationOperator`
:param release_label: The EMR release version associated with the application.
:param job_type: The type of application you want to start, such as Spark or Hive.
:param wait_for_completion: If true, wait for the Application to start before returning. Defaults to True.
If set to False, ``waiter_max_attempts`` and ``waiter_delay`` will only be applied when
waiting for the application to be in the ``CREATED`` state.
:param client_request_token: The client idempotency token of the application to create.
Its value must be unique for each request.
:param config: Optional dictionary for arbitrary parameters to the boto API create_application call.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param waiter_max_attempts: Number of times the waiter should poll the application to check the state.
Defaults to 25 if not set.
:param waiter_delay: Number of seconds between polling the state of the application.
Defaults to 60 seconds if not set.
:param deferrable: If True, the operator will wait asynchronously for application to be created.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False, but can be overridden in config file by setting default_deferrable to True)
"""
aws_hook_class = EmrServerlessHook
def __init__(
self,
release_label: str,
job_type: str,
client_request_token: str = "",
config: dict | None = None,
wait_for_completion: bool = True,
waiter_max_attempts: int | ArgNotSet = NOTSET,
waiter_delay: int | ArgNotSet = NOTSET,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
waiter_delay = 60 if waiter_delay is NOTSET else waiter_delay
waiter_max_attempts = 25 if waiter_max_attempts is NOTSET else waiter_max_attempts
self.release_label = release_label
self.job_type = job_type
self.wait_for_completion = wait_for_completion
self.kwargs = kwargs
self.config = config or {}
self.waiter_max_attempts = int(waiter_max_attempts) # type: ignore[arg-type]
self.waiter_delay = int(waiter_delay) # type: ignore[arg-type]
self.deferrable = deferrable
super().__init__(**kwargs)
self.client_request_token = client_request_token or str(uuid4())
def execute(self, context: Context) -> str | None:
response = self.hook.conn.create_application(
clientToken=self.client_request_token,
releaseLabel=self.release_label,
type=self.job_type,
**self.config,
)
application_id = response["applicationId"]
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Application Creation failed: {response}")
self.log.info("EMR serverless application created: %s", application_id)
if self.deferrable:
self.defer(
trigger=EmrServerlessCreateApplicationTrigger(
application_id=application_id,
aws_conn_id=self.aws_conn_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay),
method_name="start_application_deferred",
)
waiter = self.hook.get_waiter("serverless_app_created")
wait(
waiter=waiter,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
args={"applicationId": application_id},
failure_message="Serverless Application creation failed",
status_message="Serverless Application status is",
status_args=["application.state", "application.stateDetails"],
)
self.log.info("Starting application %s", application_id)
self.hook.conn.start_application(applicationId=application_id)
if self.wait_for_completion:
waiter = self.hook.get_waiter("serverless_app_started")
wait(
waiter=waiter,
waiter_max_attempts=self.waiter_max_attempts,
waiter_delay=self.waiter_delay,
args={"applicationId": application_id},
failure_message="Serverless Application failed to start",
status_message="Serverless Application status is",
status_args=["application.state", "application.stateDetails"],
)
return application_id
def start_application_deferred(self, context: Context, event: dict[str, Any] | None = None) -> None:
if event is None:
self.log.error("Trigger error: event is None")
raise AirflowException("Trigger error: event is None")
if event["status"] != "success":
raise AirflowException(f"Application {event['application_id']} failed to create")
self.log.info("Starting application %s", event["application_id"])
self.hook.conn.start_application(applicationId=event["application_id"])
self.defer(
trigger=EmrServerlessStartApplicationTrigger(
application_id=event["application_id"],
aws_conn_id=self.aws_conn_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Trigger error: Application failed to start, event is {validated_event}")
self.log.info("Application %s started", validated_event["application_id"])
return validated_event["application_id"]
| EmrServerlessCreateApplicationOperator |
python | huggingface__transformers | tests/quantization/fp_quant_integration/test_fp_quant.py | {
"start": 6090,
"end": 6281
} | class ____(FPQuantBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FPQuantConfig(forward_dtype="mxfp4", pseudoquantization=False)
@require_qutlass
| FPQuantMXFP4Test |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 34381,
"end": 43502
} | class ____:
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.data = np.linspace(0, 1, num=100)
def test_robust(self) -> None:
cmap_params = _determine_cmap_params(self.data, robust=True)
assert cmap_params["vmin"] == np.percentile(self.data, 2)
assert cmap_params["vmax"] == np.percentile(self.data, 98)
assert cmap_params["cmap"] == "viridis"
assert cmap_params["extend"] == "both"
assert cmap_params["levels"] is None
assert cmap_params["norm"] is None
def test_center(self) -> None:
cmap_params = _determine_cmap_params(self.data, center=0.5)
assert cmap_params["vmax"] - 0.5 == 0.5 - cmap_params["vmin"]
assert cmap_params["cmap"] == "RdBu_r"
assert cmap_params["extend"] == "neither"
assert cmap_params["levels"] is None
assert cmap_params["norm"] is None
def test_cmap_sequential_option(self) -> None:
with xr.set_options(cmap_sequential="magma"):
cmap_params = _determine_cmap_params(self.data)
assert cmap_params["cmap"] == "magma"
def test_cmap_sequential_explicit_option(self) -> None:
with xr.set_options(cmap_sequential=mpl.colormaps["magma"]):
cmap_params = _determine_cmap_params(self.data)
assert cmap_params["cmap"] == mpl.colormaps["magma"]
def test_cmap_divergent_option(self) -> None:
with xr.set_options(cmap_divergent="magma"):
cmap_params = _determine_cmap_params(self.data, center=0.5)
assert cmap_params["cmap"] == "magma"
def test_nan_inf_are_ignored(self) -> None:
cmap_params1 = _determine_cmap_params(self.data)
data = self.data
data[50:55] = np.nan
data[56:60] = np.inf
cmap_params2 = _determine_cmap_params(data)
assert cmap_params1["vmin"] == cmap_params2["vmin"]
assert cmap_params1["vmax"] == cmap_params2["vmax"]
@pytest.mark.slow
def test_integer_levels(self) -> None:
data = self.data + 1
# default is to cover full data range but with no guarantee on Nlevels
for level in np.arange(2, 10, dtype=int):
cmap_params = _determine_cmap_params(data, levels=level)
assert cmap_params["vmin"] is None
assert cmap_params["vmax"] is None
assert cmap_params["norm"].vmin == cmap_params["levels"][0]
assert cmap_params["norm"].vmax == cmap_params["levels"][-1]
assert cmap_params["extend"] == "neither"
# with min max we are more strict
cmap_params = _determine_cmap_params(
data, levels=5, vmin=0, vmax=5, cmap="Blues"
)
assert cmap_params["vmin"] is None
assert cmap_params["vmax"] is None
assert cmap_params["norm"].vmin == 0
assert cmap_params["norm"].vmax == 5
assert cmap_params["norm"].vmin == cmap_params["levels"][0]
assert cmap_params["norm"].vmax == cmap_params["levels"][-1]
assert cmap_params["cmap"].name == "Blues"
assert cmap_params["extend"] == "neither"
assert cmap_params["cmap"].N == 4
assert cmap_params["norm"].N == 5
cmap_params = _determine_cmap_params(data, levels=5, vmin=0.5, vmax=1.5)
assert cmap_params["cmap"].name == "viridis"
assert cmap_params["extend"] == "max"
cmap_params = _determine_cmap_params(data, levels=5, vmin=1.5)
assert cmap_params["cmap"].name == "viridis"
assert cmap_params["extend"] == "min"
cmap_params = _determine_cmap_params(data, levels=5, vmin=1.3, vmax=1.5)
assert cmap_params["cmap"].name == "viridis"
assert cmap_params["extend"] == "both"
def test_list_levels(self) -> None:
data = self.data + 1
orig_levels = [0, 1, 2, 3, 4, 5]
# vmin and vmax should be ignored if levels are explicitly provided
cmap_params = _determine_cmap_params(data, levels=orig_levels, vmin=0, vmax=3)
assert cmap_params["vmin"] is None
assert cmap_params["vmax"] is None
assert cmap_params["norm"].vmin == 0
assert cmap_params["norm"].vmax == 5
assert cmap_params["cmap"].N == 5
assert cmap_params["norm"].N == 6
for wrap_levels in cast(
list[Callable[[Any], dict[Any, Any]]], [list, np.array, pd.Index, DataArray]
):
cmap_params = _determine_cmap_params(data, levels=wrap_levels(orig_levels))
assert_array_equal(cmap_params["levels"], orig_levels)
def test_divergentcontrol(self) -> None:
neg = self.data - 0.1
pos = self.data
# Default with positive data will be a normal cmap
cmap_params = _determine_cmap_params(pos)
assert cmap_params["vmin"] == 0
assert cmap_params["vmax"] == 1
assert cmap_params["cmap"] == "viridis"
# Default with negative data will be a divergent cmap
cmap_params = _determine_cmap_params(neg)
assert cmap_params["vmin"] == -0.9
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "RdBu_r"
# Setting vmin or vmax should prevent this only if center is false
cmap_params = _determine_cmap_params(neg, vmin=-0.1, center=False)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "viridis"
cmap_params = _determine_cmap_params(neg, vmax=0.5, center=False)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.5
assert cmap_params["cmap"] == "viridis"
# Setting center=False too
cmap_params = _determine_cmap_params(neg, center=False)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "viridis"
# However, I should still be able to set center and have a div cmap
cmap_params = _determine_cmap_params(neg, center=0)
assert cmap_params["vmin"] == -0.9
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "RdBu_r"
# Setting vmin or vmax alone will force symmetric bounds around center
cmap_params = _determine_cmap_params(neg, vmin=-0.1)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.1
assert cmap_params["cmap"] == "RdBu_r"
cmap_params = _determine_cmap_params(neg, vmax=0.5)
assert cmap_params["vmin"] == -0.5
assert cmap_params["vmax"] == 0.5
assert cmap_params["cmap"] == "RdBu_r"
cmap_params = _determine_cmap_params(neg, vmax=0.6, center=0.1)
assert cmap_params["vmin"] == -0.4
assert cmap_params["vmax"] == 0.6
assert cmap_params["cmap"] == "RdBu_r"
# But this is only true if vmin or vmax are negative
cmap_params = _determine_cmap_params(pos, vmin=-0.1)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.1
assert cmap_params["cmap"] == "RdBu_r"
cmap_params = _determine_cmap_params(pos, vmin=0.1)
assert cmap_params["vmin"] == 0.1
assert cmap_params["vmax"] == 1
assert cmap_params["cmap"] == "viridis"
cmap_params = _determine_cmap_params(pos, vmax=0.5)
assert cmap_params["vmin"] == 0
assert cmap_params["vmax"] == 0.5
assert cmap_params["cmap"] == "viridis"
# If both vmin and vmax are provided, output is non-divergent
cmap_params = _determine_cmap_params(neg, vmin=-0.2, vmax=0.6)
assert cmap_params["vmin"] == -0.2
assert cmap_params["vmax"] == 0.6
assert cmap_params["cmap"] == "viridis"
# regression test for GH3524
# infer diverging colormap from divergent levels
cmap_params = _determine_cmap_params(pos, levels=[-0.1, 0, 1])
# specifying levels makes cmap a Colormap object
assert cmap_params["cmap"].name == "RdBu_r"
def test_norm_sets_vmin_vmax(self) -> None:
vmin = self.data.min()
vmax = self.data.max()
for norm, extend, levels in zip(
[
mpl.colors.Normalize(),
mpl.colors.Normalize(),
mpl.colors.Normalize(vmin + 0.1, vmax - 0.1),
mpl.colors.Normalize(None, vmax - 0.1),
mpl.colors.Normalize(vmin + 0.1, None),
],
["neither", "neither", "both", "max", "min"],
[7, None, None, None, None],
strict=True,
):
test_min = vmin if norm.vmin is None else norm.vmin
test_max = vmax if norm.vmax is None else norm.vmax
cmap_params = _determine_cmap_params(self.data, norm=norm, levels=levels)
assert cmap_params["vmin"] is None
assert cmap_params["vmax"] is None
assert cmap_params["norm"].vmin == test_min
assert cmap_params["norm"].vmax == test_max
assert cmap_params["extend"] == extend
assert cmap_params["norm"] == norm
@requires_matplotlib
| TestDetermineCmapParams |
python | dask__dask | dask/utils.py | {
"start": 56213,
"end": 61118
} | class ____:
"""Hash and compare a wrapped object by identity instead of value"""
def __init__(self, wrapped):
self.wrapped = wrapped
def __eq__(self, other):
if not isinstance(other, _HashIdWrapper):
return NotImplemented
return self.wrapped is other.wrapped
def __ne__(self, other):
if not isinstance(other, _HashIdWrapper):
return NotImplemented
return self.wrapped is not other.wrapped
def __hash__(self):
return id(self.wrapped)
@functools.lru_cache
def _cumsum(seq, initial_zero):
if isinstance(seq, _HashIdWrapper):
seq = seq.wrapped
if initial_zero:
return tuple(toolz.accumulate(add, seq, 0))
else:
return tuple(toolz.accumulate(add, seq))
@functools.lru_cache
def _max(seq):
if isinstance(seq, _HashIdWrapper):
seq = seq.wrapped
return max(seq)
def cached_max(seq):
"""Compute max with caching.
Caching is by the identity of `seq` rather than the value. It is thus
important that `seq` is a tuple of immutable objects, and this function
is intended for use where `seq` is a value that will persist (generally
block sizes).
Parameters
----------
seq : tuple
Values to reduce
Returns
-------
tuple
"""
assert isinstance(seq, tuple)
# Look up by identity first, to avoid a linear-time __hash__
# if we've seen this tuple object before.
result = _max(_HashIdWrapper(seq))
return result
def cached_cumsum(seq, initial_zero=False):
"""Compute :meth:`toolz.accumulate` with caching.
Caching is by the identify of `seq` rather than the value. It is thus
important that `seq` is a tuple of immutable objects, and this function
is intended for use where `seq` is a value that will persist (generally
block sizes).
Parameters
----------
seq : tuple
Values to cumulatively sum.
initial_zero : bool, optional
If true, the return value is prefixed with a zero.
Returns
-------
tuple
"""
if isinstance(seq, tuple):
# Look up by identity first, to avoid a linear-time __hash__
# if we've seen this tuple object before.
result = _cumsum(_HashIdWrapper(seq), initial_zero)
else:
# Construct a temporary tuple, and look up by value.
result = _cumsum(tuple(seq), initial_zero)
return result
def show_versions() -> None:
"""Provide version information for bug reports."""
from json import dumps
from platform import uname
from sys import stdout, version_info
from dask._compatibility import importlib_metadata
try:
from distributed import __version__ as distributed_version
except ImportError:
distributed_version = None
from dask import __version__ as dask_version
deps = [
"numpy",
"pandas",
"cloudpickle",
"fsspec",
"bokeh",
"pyarrow",
"zarr",
]
result: dict[str, str | None] = {
# note: only major, minor, micro are extracted
"Python": ".".join([str(i) for i in version_info[:3]]),
"Platform": uname().system,
"dask": dask_version,
"distributed": distributed_version,
}
for modname in deps:
try:
result[modname] = importlib_metadata.version(modname)
except importlib_metadata.PackageNotFoundError:
result[modname] = None
stdout.writelines(dumps(result, indent=2))
def maybe_pluralize(count, noun, plural_form=None):
"""Pluralize a count-noun string pattern when necessary"""
if count == 1:
return f"{count} {noun}"
else:
return f"{count} {plural_form or noun + 's'}"
def is_namedtuple_instance(obj: Any) -> bool:
"""Returns True if obj is an instance of a namedtuple.
Note: This function checks for the existence of the methods and
attributes that make up the namedtuple API, so it will return True
IFF obj's type implements that API.
"""
return (
isinstance(obj, tuple)
and hasattr(obj, "_make")
and hasattr(obj, "_asdict")
and hasattr(obj, "_replace")
and hasattr(obj, "_fields")
and hasattr(obj, "_field_defaults")
)
def get_default_shuffle_method() -> str:
if d := config.get("dataframe.shuffle.method", None):
return d
try:
from distributed import default_client
default_client()
except (ImportError, ValueError):
return "disk"
try:
from distributed.shuffle import check_minimal_arrow_version
check_minimal_arrow_version()
except ModuleNotFoundError:
return "tasks"
return "p2p"
def get_meta_library(like):
if hasattr(like, "_meta"):
like = like._meta
return import_module(typename(like).partition(".")[0])
| _HashIdWrapper |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/array_ops_test.py | {
"start": 1237,
"end": 5757
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
global_ids = test_util.create_device_ids_array((2, 1))
local_ids = np.ravel(global_ids).tolist()
mesh_dict = { # pylint: disable=g-complex-comprehension
device: layout_lib.Mesh(
_MESH_DIMS,
global_ids,
local_ids,
test_util.create_device_list((2, 1), device),
)
for device in ('CPU', 'GPU', 'TPU')
}
self.mesh = self.configTestMesh(mesh_dict)
@combinations.generate(
combinations.combine(is_graph=[False, True], size=[32, 4096])
)
def testTwoFills(self, is_graph, size):
layout_x = Layout.batch_sharded(self.mesh, _MESH_DIM_X, rank=1)
layout_y = Layout.batch_sharded(self.mesh, _MESH_DIM_Y, rank=1)
def fn():
return (
array_ops.fill([size], 0.0, layout=layout_x),
array_ops.fill([size], 0.0, layout=layout_y),
)
if is_graph:
fn = polymorphic_function.function(fn)
with api.default_mesh(self.mesh):
dtensor_x, dtensor_y = fn()
tensor = array_ops.zeros([size], layout=None)
self.assertDTensorEqual(tensor, layout_x, dtensor_x)
self.assertDTensorEqual(tensor, layout_y, dtensor_y)
@combinations.generate(
combinations.combine(
is_graph=[False, True],
size=[32, 4096],
nullary_op=[array_ops.zeros, array_ops.ones],
)
)
def testNullaryOp(self, is_graph, size, nullary_op):
layout_y = Layout.batch_sharded(self.mesh, _MESH_DIM_Y, rank=1)
tensor = nullary_op([size], layout=None)
def fn():
return nullary_op([size], layout=layout_y)
if is_graph:
fn = polymorphic_function.function(fn)
with api.default_mesh(self.mesh):
dtensor = fn()
self.assertDTensorEqual(tensor, layout_y, dtensor)
@combinations.generate(
combinations.combine(
is_graph=[False, True],
size=[32, 4096],
nullary_op=[array_ops.zeros_like_v2, array_ops.ones_like_v2],
)
)
def testNullaryLikeOpWithLayout(self, is_graph, size, nullary_op):
layout_x = Layout.batch_sharded(self.mesh, batch_dim=_MESH_DIM_X, rank=1)
layout_y = Layout.batch_sharded(self.mesh, batch_dim=_MESH_DIM_Y, rank=1)
tensor = array_ops.zeros([size], layout=None)
tensor_like = nullary_op(tensor, layout=None)
dtensor = array_ops.zeros([size], layout=layout_x)
self.assertDTensorEqual(tensor, layout_x, dtensor)
def fn(layout):
return nullary_op(dtensor, layout=layout)
if is_graph:
fn = polymorphic_function.function(fn)
with api.default_mesh(self.mesh):
dtensor_like = fn(layout_y)
self.assertDTensorEqual(tensor_like, layout_y, dtensor_like)
@combinations.generate(
combinations.combine(
is_graph=[True],
size=[32, 4096],
nullary_op=[array_ops.zeros_like_v2, array_ops.ones_like_v2],
)
)
def testNullaryLikeOpWithoutLayoutEager(self, is_graph, size, nullary_op):
layout_x = Layout.batch_sharded(self.mesh, batch_dim=_MESH_DIM_X, rank=1)
layout_replicated = Layout.replicated(self.mesh, rank=1)
tensor = array_ops.zeros([size], layout=None)
tensor_like = nullary_op(tensor, layout=None)
dtensor = array_ops.zeros([size], layout=layout_x)
self.assertDTensorEqual(tensor, layout_x, dtensor)
def fn(layout):
return nullary_op(dtensor, layout=layout)
if is_graph:
fn = polymorphic_function.function(fn)
with api.default_mesh(self.mesh):
dtensor_like = fn(None)
self.assertDTensorEqual(tensor_like, layout_replicated, dtensor_like)
@combinations.generate(
combinations.combine(
is_graph=[False],
size=[32, 4096],
nullary_op=[array_ops.zeros_like_v2, array_ops.ones_like_v2],
)
)
def testNullaryLikeOpWithoutLayoutGraph(self, is_graph, size, nullary_op):
layout_x = Layout.batch_sharded(self.mesh, batch_dim=_MESH_DIM_X, rank=1)
tensor = array_ops.zeros([size], layout=None)
tensor_like = nullary_op(tensor, layout=None)
dtensor = array_ops.zeros([size], layout=layout_x)
self.assertDTensorEqual(tensor, layout_x, dtensor)
def fn(layout):
return nullary_op(dtensor, layout=layout)
if is_graph:
fn = polymorphic_function.function(fn)
with api.default_mesh(self.mesh):
dtensor_like = fn(None)
self.assertDTensorEqual(tensor_like, layout_x, dtensor_like)
if __name__ == '__main__':
test.main()
| ArrayOpsTest |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_pretty.py | {
"start": 5002,
"end": 5071
} | class ____(list):
def __repr__(self):
return "bye"
| ReprList |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/utils/test_utils.py | {
"start": 100955,
"end": 103059
} | class ____:
@pytest.mark.db_test
def test_task_with_timestamps_defined(self, dag_maker):
"""Test task instance with defined start_date and end_date."""
with dag_maker(dag_id="test_dag"):
BaseOperator(task_id="test_task")
dag_run = dag_maker.create_dagrun()
ti = dag_run.get_task_instance(task_id="test_task")
# Set valid timestamps
start_time = pendulum.parse("2024-01-01T10:00:00Z")
end_time = pendulum.parse("2024-01-01T10:02:30Z") # 150 seconds difference
ti.start_date = start_time
ti.end_date = end_time
ti.state = TaskInstanceState.SUCCESS
ti.duration = None
# Persist changes to database
with create_session() as session:
session.merge(ti)
session.commit()
result = get_airflow_state_run_facet(
dag_id="test_dag",
run_id=dag_run.run_id,
task_ids=["test_task"],
dag_run_state=DagRunState.SUCCESS,
)
assert result["airflowState"].tasksDuration["test_task"] == 150.0
@pytest.mark.db_test
def test_task_with_none_timestamps_fallback_to_zero(self, dag_maker):
"""Test task with None timestamps falls back to 0.0."""
with dag_maker(dag_id="test_dag"):
BaseOperator(task_id="terminated_task")
dag_run = dag_maker.create_dagrun()
ti = dag_run.get_task_instance(task_id="terminated_task")
# Set None timestamps (signal-terminated case)
ti.start_date = None
ti.end_date = None
ti.state = TaskInstanceState.SKIPPED
ti.duration = None
# Persist changes to database
with create_session() as session:
session.merge(ti)
session.commit()
result = get_airflow_state_run_facet(
dag_id="test_dag",
run_id=dag_run.run_id,
task_ids=["terminated_task"],
dag_run_state=DagRunState.FAILED,
)
assert result["airflowState"].tasksDuration["terminated_task"] == 0.0
| TestGetAirflowStateRunFacet |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchMapping1.py | {
"start": 3442,
"end": 3515
} | class ____(TypedDict):
type: Literal["Str"]
str_value: str
| StrValue |
python | pandas-dev__pandas | asv_bench/benchmarks/timeseries.py | {
"start": 3263,
"end": 3490
} | class ____:
def setup(self):
N = 100000
self.rng = date_range(start="1/1/2000", periods=N, freq="min")
def time_convert(self):
DatetimeConverter.convert(self.rng, None, None)
| TimeDatetimeConverter |
python | yandexdataschool__Practical_RL | week06_policy_based/atari_wrappers.py | {
"start": 12133,
"end": 13919
} | class ____:
def __init__(self, i, env_id, **kwargs):
self.env_id = env_id
self.i = i
self.kwargs = kwargs
def __call__(self):
return nature_dqn_env(
self.env_id,
summaries=False,
clip_reward=False,
**self.kwargs,
)
def nature_dqn_env(env_id, nenvs=None, seed=None, summaries="Numpy", clip_reward=True):
"""Wraps env as in Nature DQN paper."""
if "NoFrameskip" not in env_id:
raise ValueError(f"env_id must have 'NoFrameskip' but is {env_id}")
if nenvs is not None:
if seed is None:
seed = list(range(nenvs))
if isinstance(seed, int):
seed = [seed] * nenvs
if len(seed) != nenvs:
raise ValueError(
f"seed has length {len(seed)} but must have "
f"length equal to nenvs which is {nenvs}"
)
thunks = [_thunk(i, env_id) for i in range(nenvs)]
env = ParallelEnvBatch(make_env=thunks, seeds=seed)
if summaries:
summaries_class = get_summaries_class(summaries)
env = summaries_class(env, prefix=env_id)
if clip_reward:
env = ClipReward(env)
return env
env = gym.make(env_id, render_mode="rgb_array")
if summaries:
env = TensorboardSummaries(env)
env = EpisodicLife(env)
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireReset(env)
env = StartWithRandomActions(env, max_random_actions=30)
env = MaxBetweenFrames(env)
env = SkipFrames(env, 4)
env = ImagePreprocessing(env, width=84, height=84, grayscale=True)
env = QueueFrames(env, 4)
env = SwapImageAxes(env)
if clip_reward:
env = ClipReward(env)
return env
| _thunk |
python | keon__algorithms | algorithms/graph/graph.py | {
"start": 1064,
"end": 1542
} | class ____:
"""
A directed edge in a directed graph.
Stores the source and target node of the edge.
"""
def __init__(self, node_from, node_to):
self.source = node_from
self.target = node_to
def __eq__(self, obj):
if isinstance(obj, DirectedEdge):
return obj.source == self.source and obj.target == self.target
return False
def __repr__(self):
return f"({self.source} -> {self.target})"
| DirectedEdge |
python | ray-project__ray | python/ray/util/collective/types.py | {
"start": 3481,
"end": 3550
} | class ____:
timeout_ms = unset_timeout_ms
@dataclass
| BarrierOptions |
python | ansible__ansible | lib/ansible/modules/service.py | {
"start": 49613,
"end": 54413
} | class ____(Service):
"""
This is the SunOS Service manipulation class - it uses the svcadm
command for controlling services, and svcs command for checking status.
It also tries to be smart about taking the service out of maintenance
state if necessary.
"""
platform = 'SunOS'
distribution = None
def get_service_tools(self):
self.svcs_cmd = self.module.get_bin_path('svcs', True)
if not self.svcs_cmd:
self.module.fail_json(msg='unable to find svcs binary')
self.svcadm_cmd = self.module.get_bin_path('svcadm', True)
if not self.svcadm_cmd:
self.module.fail_json(msg='unable to find svcadm binary')
if self.svcadm_supports_sync():
self.svcadm_sync = '-s'
else:
self.svcadm_sync = ''
def svcadm_supports_sync(self):
# Support for synchronous restart/refresh is only supported on
# Oracle Solaris >= 11.2
for line in open('/etc/release', 'r').readlines():
m = re.match(r'\s+Oracle Solaris (\d+)\.(\d+).*', line.rstrip())
if m and m.groups() >= ('11', '2'):
return True
def get_service_status(self):
status = self.get_sunos_svcs_status()
# Only 'online' is considered properly running. Everything else is off
# or has some sort of problem.
if status == 'online':
self.running = True
else:
self.running = False
def get_sunos_svcs_status(self):
rc, stdout, stderr = self.execute_command("%s %s" % (self.svcs_cmd, self.name))
if rc == 1:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
lines = stdout.rstrip("\n").split("\n")
status = lines[-1].split(" ")[0]
# status is one of: online, offline, degraded, disabled, maintenance, uninitialized
# see man svcs(1)
return status
def service_enable(self):
# Get current service enablement status
rc, stdout, stderr = self.execute_command("%s -l %s" % (self.svcs_cmd, self.name))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
enabled = False
temporary = False
# look for enabled line, which could be one of:
# enabled true (temporary)
# enabled false (temporary)
# enabled true
# enabled false
for line in stdout.split("\n"):
if line.startswith("enabled"):
if "true" in line:
enabled = True
if "temporary" in line:
temporary = True
startup_enabled = (enabled and not temporary) or (not enabled and temporary)
if self.enable and startup_enabled:
return
elif (not self.enable) and (not startup_enabled):
return
if not self.module.check_mode:
# Mark service as started or stopped (this will have the side effect of
# actually stopping or starting the service)
if self.enable:
subcmd = "enable -rs"
else:
subcmd = "disable -s"
rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
self.changed = True
def service_control(self):
status = self.get_sunos_svcs_status()
# if starting or reloading, clear maintenance states
if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']:
rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name))
if rc != 0:
return rc, stdout, stderr
status = self.get_sunos_svcs_status()
if status in ['maintenance', 'degraded']:
self.module.fail_json(msg="Failed to bring service out of %s status." % status)
if self.action == 'start':
subcmd = "enable -rst"
elif self.action == 'stop':
subcmd = "disable -st"
elif self.action == 'reload':
subcmd = "refresh %s" % (self.svcadm_sync)
elif self.action == 'restart' and status == 'online':
subcmd = "restart %s" % (self.svcadm_sync)
elif self.action == 'restart' and status != 'online':
subcmd = "enable -rst"
return self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
| SunOSService |
python | django__django | tests/auth_tests/test_forms.py | {
"start": 1565,
"end": 2344
} | class ____:
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(
username="testclient", password="password", email="testclient@example.com"
)
cls.u2 = User.objects.create_user(
username="inactive", password="password", is_active=False
)
cls.u3 = User.objects.create_user(username="staff", password="password")
cls.u4 = User.objects.create(username="empty_password", password="")
cls.u5 = User.objects.create(username="unmanageable_password", password="$")
cls.u6 = User.objects.create(username="unknown_password", password="foo$bar")
cls.u7 = User.objects.create(
username="unusable_password", password=make_password(None)
)
| TestDataMixin |
python | encode__django-rest-framework | rest_framework/mixins.py | {
"start": 1458,
"end": 1712
} | class ____:
"""
Retrieve a model instance.
"""
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(serializer.data)
| RetrieveModelMixin |
python | bokeh__bokeh | src/bokeh/client/states.py | {
"start": 1928,
"end": 2058
} | class ____(metaclass=ABCMeta):
@abstractmethod
async def run(self, connection: ClientConnection) -> None:
pass
| State |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_restore.py | {
"start": 1471,
"end": 2118
} | class ____(Callback):
def _check_properties(self, trainer, pl_module):
assert trainer.global_step == pl_module.global_step
assert trainer.current_epoch == pl_module.current_epoch
def on_train_start(self, trainer, pl_module):
self._check_properties(trainer, pl_module)
def on_train_batch_start(self, trainer, pl_module, *_):
self._check_properties(trainer, pl_module)
def on_train_batch_end(self, trainer, pl_module, *_):
self._check_properties(trainer, pl_module)
def on_train_end(self, trainer, pl_module):
self._check_properties(trainer, pl_module)
| ModelTrainerPropertyParity |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_task_runs.py | {
"start": 643,
"end": 7429
} | class ____:
async def test_create_task_run(self, flow_run, client, session):
task_run_data = {
"flow_run_id": str(flow_run.id),
"task_key": "my-task-key",
"name": "my-cool-task-run-name",
"dynamic_key": "0",
"labels": {"env": "dev"},
}
response = await client.post("/task_runs/", json=task_run_data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["flow_run_id"] == str(flow_run.id)
assert response.json()["id"]
assert response.json()["name"] == "my-cool-task-run-name"
assert response.json()["labels"] == {
"env": "dev",
"prefect.flow.id": str(flow_run.flow_id),
"prefect.flow-run.id": str(flow_run.id),
}
task_run = await models.task_runs.read_task_run(
session=session, task_run_id=response.json()["id"]
)
assert task_run
assert task_run.flow_run_id == flow_run.id
async def test_create_task_run_gracefully_upserts(self, flow_run, client):
# create a task run
task_run_data = {
"flow_run_id": str(flow_run.id),
"task_key": "my-task-key",
"dynamic_key": "my-dynamic-key",
}
task_run_response = await client.post("/task_runs/", json=task_run_data)
# recreate the same task run, ensure graceful upsert
response = await client.post("/task_runs/", json=task_run_data)
assert response.status_code == status.HTTP_200_OK
assert response.json()["id"] == task_run_response.json()["id"]
async def test_create_task_run_without_flow_run_id(self, flow_run, client, session):
task_run_data = {
"flow_run_id": None,
"task_key": "my-task-key",
"name": "my-cool-task-run-name",
"dynamic_key": "0",
}
response = await client.post("/task_runs/", json=task_run_data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["flow_run_id"] is None
assert response.json()["id"]
assert response.json()["name"] == "my-cool-task-run-name"
task_run = await models.task_runs.read_task_run(
session=session, task_run_id=response.json()["id"]
)
assert task_run.flow_run_id is None
# Posting the same data twice should result in an upsert
response_2 = await client.post("/task_runs/", json=task_run_data)
assert response_2.status_code == status.HTTP_200_OK
assert response.json()["id"] == response_2.json()["id"]
async def test_create_task_run_without_state(self, flow_run, client, session):
task_run_data = dict(
flow_run_id=str(flow_run.id), task_key="task-key", dynamic_key="0"
)
response = await client.post("/task_runs/", json=task_run_data)
task_run = await models.task_runs.read_task_run(
session=session, task_run_id=response.json()["id"]
)
assert str(task_run.id) == response.json()["id"]
assert task_run.state.type == states.StateType.PENDING
async def test_create_task_run_with_state(self, flow_run, client, session):
task_run_data = schemas.actions.TaskRunCreate(
flow_run_id=flow_run.id,
task_key="task-key",
state=schemas.actions.StateCreate(type=schemas.states.StateType.RUNNING),
dynamic_key="0",
)
response = await client.post(
"/task_runs/", json=task_run_data.model_dump(mode="json")
)
task_run = await models.task_runs.read_task_run(
session=session, task_run_id=response.json()["id"]
)
assert str(task_run.id) == response.json()["id"]
assert task_run.state.type == task_run_data.state.type
async def test_raises_on_retry_delay_validation(self, flow_run, client, session):
task_run_data = {
"flow_run_id": str(flow_run.id),
"task_key": "my-task-key",
"name": "my-cool-task-run-name",
"dynamic_key": "0",
"empirical_policy": {"retries": 3, "retry_delay": list(range(100))},
}
response = await client.post("/task_runs/", json=task_run_data)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
assert (
"Can not configure more than 50 retry delays per task."
in response.json()["exception_detail"][0]["msg"]
)
async def test_raises_on_jitter_factor_validation(self, flow_run, client, session):
task_run_data = {
"flow_run_id": str(flow_run.id),
"task_key": "my-task-key",
"name": "my-cool-task-run-name",
"dynamic_key": "0",
"empirical_policy": {"retries": 3, "retry_jitter_factor": -100},
}
response = await client.post("/task_runs/", json=task_run_data)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
assert (
"`retry_jitter_factor` must be >= 0."
in response.json()["exception_detail"][0]["msg"]
)
async def test_create_task_run_with_client_provided_id(self, flow_run, client):
client_provided_id = uuid.uuid4()
task_run_data = {
"flow_run_id": str(flow_run.id),
"task_key": "my-task-key",
"name": "my-cool-task-run-name",
"dynamic_key": "0",
"id": str(client_provided_id),
}
response = await client.post(
"/task_runs/",
json=task_run_data,
)
assert response.status_code == 201
assert response.json()["id"] == str(client_provided_id)
async def test_create_task_run_with_same_client_provided_id(
self,
flow_run,
client,
):
client_provided_id = uuid.uuid4()
task_run_data = {
"flow_run_id": str(flow_run.id),
"task_key": "my-task-key",
"name": "my-cool-task-run-name",
"dynamic_key": "0",
"id": str(client_provided_id),
}
response = await client.post(
"/task_runs/",
json=task_run_data,
)
assert response.status_code == 201
assert response.json()["id"] == str(client_provided_id)
task_run_data = {
"flow_run_id": str(flow_run.id),
"task_key": "my-task-key",
"name": "my-cool-task-run-name",
"dynamic_key": "1",
"id": str(client_provided_id),
}
response = await client.post(
"/task_runs/",
json=task_run_data,
)
assert response.status_code == 409
| TestCreateTaskRun |
python | crytic__slither | slither/detectors/statements/incorrect_using_for.py | {
"start": 5598,
"end": 8468
} | class ____(AbstractDetector):
"""
Detector for incorrect using-for statement usage.
"""
ARGUMENT = "incorrect-using-for"
HELP = "Detects using-for statement usage when no function from a given library matches a given type"
IMPACT = DetectorClassification.INFORMATIONAL
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-usage-of-using-for-statement"
WIKI_TITLE = "Incorrect usage of using-for statement"
WIKI_DESCRIPTION = (
"In Solidity, it is possible to use libraries for certain types, by the `using-for` statement "
"(`using <library> for <type>`). However, the Solidity compiler doesn't check whether a given "
"library has at least one function matching a given type. If it doesn't, such a statement has "
"no effect and may be confusing. "
)
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
library L {
function f(bool) public pure {}
}
using L for uint;
```
Such a code will compile despite the fact that `L` has no function with `uint` as its first argument."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = (
"Make sure that the libraries used in `using-for` statements have at least one function "
"matching a type used in these statements. "
)
def _append_result(
self, results: List[Output], uf: UsingForTopLevel, type_: Type, library: Contract
) -> None:
info: DETECTOR_INFO = [
f"using-for statement at {uf.source_mapping} is incorrect - no matching function for {type_} found in ",
library,
".\n",
]
res = self.generate_result(info)
results.append(res)
def _detect(self) -> List[Output]:
results: List[Output] = []
for uf in self.compilation_unit.using_for_top_level:
# UsingForTopLevel.using_for is a dict with a single entry, which is mapped to a list of functions/libraries
# the following code extracts the type from using-for and skips using-for statements with functions
type_ = list(uf.using_for.keys())[0]
for lib_or_fcn in uf.using_for[type_]:
# checking for using-for with functions is already performed by the compiler; we only consider libraries
if isinstance(lib_or_fcn, UserDefinedType):
lib_or_fcn_type = lib_or_fcn.type
if (
isinstance(type_, Type)
and isinstance(lib_or_fcn_type, Contract)
and not _is_correctly_used(type_, lib_or_fcn_type)
):
self._append_result(results, uf, type_, lib_or_fcn_type)
return results
| IncorrectUsingFor |
python | pytorch__pytorch | torch/_functorch/partitioners.py | {
"start": 2205,
"end": 3019
} | class ____:
"""Class for keeping track of different operator categories"""
fusible_ops: OrderedSet[Callable]
compute_intensive_ops: OrderedSet[Callable]
random_ops: OrderedSet[Callable]
view_ops: OrderedSet[Callable]
recomputable_ops: OrderedSet[Callable]
def is_fusible(self, node: fx.Node):
return get_aten_target(node) in self.fusible_ops
def is_compute_intensive(self, node: fx.Node):
return get_aten_target(node) in self.compute_intensive_ops
def is_random(self, node: fx.Node):
return get_aten_target(node) in self.random_ops
def is_view(self, node: fx.Node):
return get_aten_target(node) in self.view_ops
def is_recomputable(self, node: fx.Node):
return get_aten_target(node) in self.recomputable_ops
@dataclass
| OpTypes |
python | FactoryBoy__factory_boy | tests/test_transformer.py | {
"start": 6012,
"end": 6894
} | class ____(TestCase):
def test_traits_off(self):
instance = WithTraitFactory()
self.assertEqual(instance.one, "ONE")
self.assertIsNone(instance.two)
self.assertIsNone(instance.three)
def test_trait_transform_applies(self):
"""A trait-provided transformer should apply to existing values"""
instance = WithTraitFactory(upper_two=True)
self.assertEqual(instance.one, "ONE")
self.assertEqual(instance.two, "TWO")
self.assertIsNone(instance.three)
def test_trait_transform_applies_supplied(self):
"""A trait-provided transformer should be overridden by caller-provided values"""
instance = WithTraitFactory(upper_two=True, two="two")
self.assertEqual(instance.one, "ONE")
self.assertEqual(instance.two, "two")
self.assertIsNone(instance.three)
| TransformerTraitTest |
python | astropy__astropy | astropy/coordinates/attributes.py | {
"start": 17631,
"end": 19903
} | class ____(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
doc : str
Description of the frame attribute for help and documentation
"""
def __init__(
self, default=None, allowed_classes=None, secondary_attribute="", **kwargs
):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute, **kwargs)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if not isinstance(value, self.allowed_classes):
if len(self.allowed_classes) == 1:
value = self.allowed_classes[0](value)
else:
raise TypeError(
"Tried to set a DifferentialAttribute with an unsupported"
f" Differential type {value.__class__}. Allowed classes are:"
f" {self.allowed_classes}"
)
return value, True
| DifferentialAttribute |
python | pypa__virtualenv | src/virtualenv/create/via_global_ref/builtin/cpython/common.py | {
"start": 644,
"end": 1151
} | class ____(CPython, PosixSupports, ABC):
"""Create a CPython virtual environment on POSIX platforms."""
@classmethod
def _executables(cls, interpreter):
host_exe = Path(interpreter.system_executable)
major, minor = interpreter.version_info.major, interpreter.version_info.minor
targets = OrderedDict((i, None) for i in ["python", f"python{major}", f"python{major}.{minor}", host_exe.name])
yield host_exe, list(targets.keys()), RefMust.NA, RefWhen.ANY
| CPythonPosix |
python | kamyu104__LeetCode-Solutions | Python/find-the-width-of-columns-of-a-grid.py | {
"start": 537,
"end": 853
} | class ____(object):
def findColumnWidth(self, grid):
"""
:type grid: List[List[int]]
:rtype: List[int]
"""
return [max(len(str(grid[i][j])) for i in xrange(len(grid))) for j in xrange(len(grid[0]))]
# Time: O(m * n)
# Space: O(m + logr)
import itertools
# array
| Solution2 |
python | pypa__pipenv | pipenv/patched/pip/_internal/utils/logging.py | {
"start": 7477,
"end": 12286
} | class ____(Filter):
"""
A logging Filter that excludes records from a logger (or its children).
"""
def filter(self, record: logging.LogRecord) -> bool:
# The base Filter class allows only records from a logger (or its
# children).
return not super().filter(record)
def setup_logging(verbosity: int, no_color: bool, user_log_file: Optional[str]) -> int:
"""Configures and sets up all of the logging
Returns the requested logging level, as its integer value.
"""
# Determine the level to be logging at.
if verbosity >= 2:
level_number = logging.DEBUG
elif verbosity == 1:
level_number = VERBOSE
elif verbosity == -1:
level_number = logging.WARNING
elif verbosity == -2:
level_number = logging.ERROR
elif verbosity <= -3:
level_number = logging.CRITICAL
else:
level_number = logging.INFO
level = logging.getLevelName(level_number)
# The "root" logger should match the "console" level *unless* we also need
# to log to a user log file.
include_user_log = user_log_file is not None
if include_user_log:
additional_log_file = user_log_file
root_level = "DEBUG"
else:
additional_log_file = "/dev/null"
root_level = level
# Disable any logging besides WARNING unless we have DEBUG level logging
# enabled for vendored libraries.
vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG"
# Shorthands for clarity
handler_classes = {
"stream": "pipenv.patched.pip._internal.utils.logging.RichPipStreamHandler",
"file": "pipenv.patched.pip._internal.utils.logging.BetterRotatingFileHandler",
}
handlers = ["console", "console_errors", "console_subprocess"] + (
["user_log"] if include_user_log else []
)
global _stdout_console, stderr_console
_stdout_console = PipConsole(file=sys.stdout, no_color=no_color, soft_wrap=True)
_stderr_console = PipConsole(file=sys.stderr, no_color=no_color, soft_wrap=True)
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"filters": {
"exclude_warnings": {
"()": "pipenv.patched.pip._internal.utils.logging.MaxLevelFilter",
"level": logging.WARNING,
},
"restrict_to_subprocess": {
"()": "logging.Filter",
"name": subprocess_logger.name,
},
"exclude_subprocess": {
"()": "pipenv.patched.pip._internal.utils.logging.ExcludeLoggerFilter",
"name": subprocess_logger.name,
},
},
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": "%(message)s",
},
"indent_with_timestamp": {
"()": IndentingFormatter,
"format": "%(message)s",
"add_timestamp": True,
},
},
"handlers": {
"console": {
"level": level,
"class": handler_classes["stream"],
"console": _stdout_console,
"filters": ["exclude_subprocess", "exclude_warnings"],
"formatter": "indent",
},
"console_errors": {
"level": "WARNING",
"class": handler_classes["stream"],
"console": _stderr_console,
"filters": ["exclude_subprocess"],
"formatter": "indent",
},
# A handler responsible for logging to the console messages
# from the "subprocessor" logger.
"console_subprocess": {
"level": level,
"class": handler_classes["stream"],
"console": _stderr_console,
"filters": ["restrict_to_subprocess"],
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": handler_classes["file"],
"filename": additional_log_file,
"encoding": "utf-8",
"delay": True,
"formatter": "indent_with_timestamp",
},
},
"root": {
"level": root_level,
"handlers": handlers,
},
"loggers": {"pipenv.patched.pip._vendor": {"level": vendored_log_level}},
}
)
return level_number
| ExcludeLoggerFilter |
python | plotly__plotly.py | plotly/graph_objs/funnelarea/_title.py | {
"start": 233,
"end": 3635
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnelarea"
_path_str = "funnelarea.title"
_valid_props = {"font", "position", "text"}
@property
def font(self):
"""
Sets the font used for `title`.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.funnelarea.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def position(self):
"""
Specifies the location of the `title`.
The 'position' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right']
Returns
-------
Any
"""
return self["position"]
@position.setter
def position(self, val):
self["position"] = val
@property
def text(self):
"""
Sets the title of the chart. If it is empty, no title is
displayed.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets the font used for `title`.
position
Specifies the location of the `title`.
text
Sets the title of the chart. If it is empty, no title
is displayed.
"""
def __init__(self, arg=None, font=None, position=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnelarea.Title`
font
Sets the font used for `title`.
position
Specifies the location of the `title`.
text
Sets the title of the chart. If it is empty, no title
is displayed.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnelarea.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnelarea.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("position", arg, position)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/asyncio/session.py | {
"start": 2878,
"end": 6413
} | class ____:
"""Mixin class which provides an awaitable accessor for all attributes.
E.g.::
from __future__ import annotations
from typing import List
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy.ext.asyncio import AsyncAttrs
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
class Base(AsyncAttrs, DeclarativeBase):
pass
class A(Base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str]
bs: Mapped[List[B]] = relationship()
class B(Base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
a_id: Mapped[int] = mapped_column(ForeignKey("a.id"))
data: Mapped[str]
In the above example, the :class:`_asyncio.AsyncAttrs` mixin is applied to
the declarative ``Base`` class where it takes effect for all subclasses.
This mixin adds a single new attribute
:attr:`_asyncio.AsyncAttrs.awaitable_attrs` to all classes, which will
yield the value of any attribute as an awaitable. This allows attributes
which may be subject to lazy loading or deferred / unexpiry loading to be
accessed such that IO can still be emitted::
a1 = (await async_session.scalars(select(A).where(A.id == 5))).one()
# use the lazy loader on ``a1.bs`` via the ``.awaitable_attrs``
# interface, so that it may be awaited
for b1 in await a1.awaitable_attrs.bs:
print(b1)
The :attr:`_asyncio.AsyncAttrs.awaitable_attrs` performs a call against the
attribute that is approximately equivalent to using the
:meth:`_asyncio.AsyncSession.run_sync` method, e.g.::
for b1 in await async_session.run_sync(lambda sess: a1.bs):
print(b1)
.. versionadded:: 2.0.13
.. seealso::
:ref:`asyncio_orm_avoid_lazyloads`
"""
class _AsyncAttrGetitem:
__slots__ = "_instance"
def __init__(self, _instance: Any):
self._instance = _instance
def __getattr__(self, name: str) -> Awaitable[Any]:
return greenlet_spawn(getattr, self._instance, name)
@property
def awaitable_attrs(self) -> AsyncAttrs._AsyncAttrGetitem:
"""provide a namespace of all attributes on this object wrapped
as awaitables.
e.g.::
a1 = (await async_session.scalars(select(A).where(A.id == 5))).one()
some_attribute = await a1.awaitable_attrs.some_deferred_attribute
some_collection = await a1.awaitable_attrs.some_collection
""" # noqa: E501
return AsyncAttrs._AsyncAttrGetitem(self)
@util.create_proxy_methods(
Session,
":class:`_orm.Session`",
":class:`_asyncio.AsyncSession`",
classmethods=["object_session", "identity_key"],
methods=[
"__contains__",
"__iter__",
"add",
"add_all",
"expire",
"expire_all",
"expunge",
"expunge_all",
"is_modified",
"in_transaction",
"in_nested_transaction",
],
attributes=[
"dirty",
"deleted",
"new",
"identity_map",
"is_active",
"autoflush",
"no_autoflush",
"info",
"execution_options",
],
)
| AsyncAttrs |
python | django__django | tests/template_tests/syntax_tests/i18n/test_get_current_language.py | {
"start": 129,
"end": 588
} | class ____(SimpleTestCase):
libraries = {"i18n": "django.templatetags.i18n"}
@setup({"template": "{% load i18n %} {% get_current_language %}"})
def test_no_as_var(self):
msg = (
"'get_current_language' requires 'as variable' (got "
"['get_current_language'])"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("template")
| I18nGetCurrentLanguageTagTests |
python | mlflow__mlflow | mlflow/models/dependencies_schemas.py | {
"start": 9703,
"end": 10254
} | class ____:
retriever_schemas: list[RetrieverSchema] = field(default_factory=list)
def to_dict(self) -> dict[str, dict[DependenciesSchemasType, list[dict[str, Any]]]]:
if not self.retriever_schemas:
return None
return {
"dependencies_schemas": {
DependenciesSchemasType.RETRIEVERS.value: [
index.to_dict()[DependenciesSchemasType.RETRIEVERS.value][0]
for index in self.retriever_schemas
],
}
}
| DependenciesSchemas |
python | huggingface__transformers | src/transformers/modeling_utils.py | {
"start": 44908,
"end": 231747
} | class ____(nn.Module, EmbeddingAccessMixin, ModuleUtilsMixin, PushToHubMixin, PeftAdapterMixin):
r"""
Base class for all models.
[`PreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading,
downloading and saving models as well as a few methods common to all models to:
- resize the input embeddings
Class attributes (overridden by derived classes):
- **config_class** ([`PreTrainedConfig`]) -- A subclass of [`PreTrainedConfig`] to use as configuration class
for this model architecture.
- **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived
classes of the same architecture adding modules on top of the base model.
- **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP
models, `pixel_values` for vision models and `input_values` for speech models).
- **can_record_outputs** (dict):
"""
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
model_tags = None
_checkpoint_conversion_mapping = {} # used for BC support in VLMs, not meant to be used by new models
_auto_class = None
_no_split_modules = None
_skip_keys_device_placement = None
_keep_in_fp32_modules = None
# the _keep_in_fp32_modules will avoid casting to anything other than float32, except bfloat16
# to also prevent bfloat16 casting, use the _keep_in_fp32_modules_strict flag
_keep_in_fp32_modules_strict = None
dtype_plan: Optional[dict[str, torch.dtype]] = None
# a list of `re` patterns of `state_dict` keys that should be removed from the list of missing
# keys we find (keys inside the model but not in the checkpoint) and avoid unnecessary warnings.
_keys_to_ignore_on_load_missing = None
# a list of `re` patterns of `state_dict` keys that should be removed from the list of
# unexpected keys we find (keys inside the checkpoint but not the model) and avoid unnecessary
# warnings.
_keys_to_ignore_on_load_unexpected = None
# a list of `state_dict` keys to ignore when saving the model (useful for keys that aren't
# trained, but which are either deterministic or tied variables)
_keys_to_ignore_on_save = None
# a list of `state_dict` keys that are potentially tied to another key in the state_dict.
_tied_weights_keys = None
supports_gradient_checkpointing = False
_is_stateful = False
# Flash Attention support
_supports_flash_attn = False
# SDPA support
_supports_sdpa = False
# Flex Attention support
_supports_flex_attn = False
_can_compile_fullgraph = False
# A tensor parallel plan to be applied to the model when TP is enabled. For
# top-level models, this attribute is currently defined in respective model
# code. For base models, this attribute comes from
# `config.base_model_tp_plan` during `__init__`.
# It should identify the layers exactly: if you want to TP model.language_model.layers.fc1
# by passing `tp_plan` to the init, it should be {"model.language_model.layers.fc1":"colwise"}
# for example.
_tp_plan = None
# tensor parallel degree to which model is sharded to.
_tp_size = None
# A pipeline parallel plan specifying the layers which may not be present
# on all ranks when PP is enabled. For top-level models, this attribute is
# currently defined in respective model code. For base models, this
# attribute comes from `config.base_model_pp_plan` during `post_init`.
#
# The variable names for the inputs and outputs of the specified layers can
# be indexed using the `PipelineParallel` enum as follows:
# - `_pp_plan["layers"][PipelineParallel.inputs]`
# - `_pp_plan["layers"][PipelineParallel.outputs]`
_pp_plan = None
# This flag signal that the model can be used as an efficient backend in TGI and vLLM
# In practice, it means that they support attention (mask) interface functions, fully pass the kwargs
# through all modules up to the Attention layer, can slice logits with Tensor, and have a default TP plan
_supports_attention_backend = False
_can_record_outputs = None
# Attributes used mainly in multimodal LLMs, though all models contain a valid field for these
# Possible values are: text, image, video, audio and time
input_modalities: Union[str, list[str]] = "text" # most models are text
@property
@torch._dynamo.allow_in_graph
def can_record_outputs(self) -> dict[str, OutputRecorder]:
"""
Maps output names (e.g., "attentions", "hidden_states")
to either:
- A module class (e.g., `LlamaDecoderLayer`), using default index conventions:
* index=0 for "hidden_states"
* index=1 for "attentions"
- Or an `OutputRecorder(...)` with `target_class`, optional `index`, and `layer_name`.
Examples:
These two are equivalent:
```python
_can_record_outputs = {
"attentions": LlamaAttention,
"hidden_states": LlamaDecoderLayer
}
_can_record_outputs = {
"attentions": OutputRecorder(LlamaAttention, index=1),
"hidden_states": OutputRecorder(LlamaDecoderLayer, index=0)
}
```
This means you can record outputs from the same class, by specifying a layer name. Before
collecting outputs, we check that they come from this layer.
If you have cross attention that come from `LlamaAttention` and self attention that also
come from `LlamaAttention` but from `self_attn` you can do this:
```python
class LlamaModel(PreTrainedModel):
_can_record_outputs = {
"attentions": OutputRecorder(LlamaAttention, index=1, layer-name="self_attn"),
"cross_attentions": OutputRecorder(LlamaAttention, index=1, layer_name="cross_attn")
}
```
"""
return self._can_record_outputs or {}
@property
def dummy_inputs(self) -> dict[str, torch.Tensor]:
"""
`dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# For BC we keep the original `config_class` definition in case
# there is a `config_class` attribute (e.g. remote code models),
# otherwise we derive it from the annotated `config` attribute.
# defined in this particular subclass
child_annotation = cls.__dict__.get("__annotations__", {}).get("config", None)
child_attribute = cls.__dict__.get("config_class", None)
# defined in the class (this subclass or any parent class)
full_annotation = get_type_hints(cls).get("config", None)
full_attribute = cls.config_class
# priority (child class_config -> child annotation -> global class_config -> global annotation)
if child_attribute is not None:
cls.config_class = child_attribute
elif child_annotation is not None:
cls.config_class = child_annotation
elif full_attribute is not None:
cls.config_class = full_attribute
elif full_annotation is not None:
cls.config_class = full_annotation
def __init__(self, config: PreTrainedConfig, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PreTrainedConfig):
raise TypeError(
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
"`PreTrainedConfig`. To create a model from a pretrained model use "
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
self.config = config
# Check the attention implementation is supported, or set it if not yet set (on the internal attr, to avoid
# setting it recursively)
self.config._attn_implementation_internal = self._check_and_adjust_attn_implementation(
self.config._attn_implementation, is_init_check=True
)
if self.can_generate():
self.generation_config = GenerationConfig.from_model_config(config)
# for initialization of the loss
loss_type = self.__class__.__name__
if loss_type not in LOSS_MAPPING:
loss_groups = f"({'|'.join(LOSS_MAPPING)})"
loss_type = re.findall(loss_groups, self.__class__.__name__)
if len(loss_type) > 0:
loss_type = loss_type[0]
else:
loss_type = None
self.loss_type = loss_type
self.name_or_path = config.name_or_path
self.warnings_issued = {}
# Overwrite the class attribute to make it an instance attribute, so models like
# `InstructBlipForConditionalGeneration` can dynamically update it without modifying the class attribute
# when a different component (e.g. language_model) is used.
self._keep_in_fp32_modules = copy.copy(self.__class__._keep_in_fp32_modules)
self._keep_in_fp32_modules_strict = copy.copy(self.__class__._keep_in_fp32_modules_strict)
self.dtype_plan = {}
if isinstance(self._keep_in_fp32_modules, list):
self.dtype_plan.update(dict.fromkeys(self._keep_in_fp32_modules, torch.float32))
if isinstance(self._keep_in_fp32_modules_strict, list):
self.dtype_plan.update(dict.fromkeys(self._keep_in_fp32_modules_strict, torch.float32))
self._no_split_modules = self._no_split_modules or []
_CAN_RECORD_REGISTRY[str(self.__class__)] = self._can_record_outputs # added for executorch support only
def post_init(self):
"""
A method executed at the end of each Transformer model initialization, to execute code that needs the model's
modules properly initialized (such as weight initialization).
"""
# Attach the different parallel plans and tied weight keys to the top-most model, so that everything is
# easily available
self._tp_plan, self._ep_plan, self._pp_plan = {}, {}, {}
# Current submodel should register its tied weights
self.all_tied_weights_keys = self.get_expanded_tied_weights_keys(all_submodels=False)
# If current model is a base model, attach `base_model_tp_plan` and `base_model_pp_plan` from config
if self.base_model is self:
self._pp_plan = self.config.base_model_pp_plan.copy() if self.config.base_model_pp_plan is not None else {}
self._tp_plan = self.config.base_model_tp_plan.copy() if self.config.base_model_tp_plan is not None else {}
self._ep_plan = self.config.base_model_ep_plan.copy() if self.config.base_model_ep_plan is not None else {}
for name, module in self.named_children():
# Parallel plans
if plan := getattr(module, "_ep_plan", None):
self._ep_plan.update({f"{name}.{k}": v for k, v in plan.copy().items()})
if plan := getattr(module, "_tp_plan", None):
self._tp_plan.update({f"{name}.{k}": v for k, v in plan.copy().items()})
if plan := getattr(module, "_pp_plan", None):
self._pp_plan.update({f"{name}.{k}": v for k, v in plan.copy().items()})
# Always attach the keys of the children (if the children's config says to NOT tie, then it's empty)
if tied_keys := getattr(module, "all_tied_weights_keys", None):
self.all_tied_weights_keys.update({f"{name}.{k}": f"{name}.{v}" for k, v in tied_keys.copy().items()})
# Maybe initialize the weights and tie the keys
self.init_weights()
self._backward_compatibility_gradient_checkpointing()
@property
def tp_plan(self) -> dict[str, str]:
"""
The full tp plan for the model's modules
"""
if hasattr(self.config, "distributed_config") and self.config.distributed_config.enable_expert_parallel:
return self._ep_plan
return self._tp_plan
@property
def pp_plan(self) -> dict[str, tuple[str, str]]:
return self._pp_plan
@tp_plan.setter
def tp_plan(self, plan: dict[str, str] | None):
if plan is None:
self._tp_plan = {}
return
if not isinstance(plan, dict):
raise ValueError("Can only set a dictionary as `tp_plan`")
# Ensure the styles are all valid
for layer_pattern, parallel_style in plan.items():
if parallel_style not in ALL_PARALLEL_STYLES:
raise ValueError(
f"Unsupported tensor parallel style '{parallel_style}' for layer '{layer_pattern}'. "
f"Supported styles are {list(ALL_PARALLEL_STYLES.keys())}"
)
# Validate that the layer patterns match existing model structure. We check this by getting all parameter
# names and seeing if any match the patterns
model_param_names = [name for name, _ in self.named_parameters()]
for layer_pattern in plan.keys():
# Convert pattern to regex (replace * with .*)
regex_pattern = layer_pattern.replace("*", r"\d+")
pattern_matched = False
for param_name in model_param_names:
if re.match(regex_pattern, param_name):
pattern_matched = True
break
if not pattern_matched:
warnings.warn(
f"Layer pattern '{layer_pattern}' does not match any parameters in the model. This rule may not "
"be applied during tensor parallelization, or may lead to dimension mismatches"
)
# Set the plan
self._tp_plan = plan
@pp_plan.setter
def pp_plan(self, plan: dict[str, tuple[str, str]]):
self._pp_plan = plan
def dequantize(self):
"""
Potentially dequantize the model in case it has been quantized by a quantization method that support
dequantization.
"""
hf_quantizer = getattr(self, "hf_quantizer", None)
if hf_quantizer is None:
raise ValueError("You need to first quantize your model in order to dequantize it")
return hf_quantizer.dequantize(self)
def _backward_compatibility_gradient_checkpointing(self):
if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False):
self.gradient_checkpointing_enable()
# Remove the attribute now that is has been consumed, so it's no saved in the config.
delattr(self.config, "gradient_checkpointing")
def add_model_tags(self, tags: Union[list[str], str]) -> None:
r"""
Add custom tags into the model that gets pushed to the Hugging Face Hub. Will
not overwrite existing tags in the model.
Args:
tags (`Union[list[str], str]`):
The desired tags to inject in the model
Examples:
```python
from transformers import AutoModel
model = AutoModel.from_pretrained("google-bert/bert-base-cased")
model.add_model_tags(["custom", "custom-bert"])
# Push the model to your namespace with the name "my-custom-bert".
model.push_to_hub("my-custom-bert")
```
"""
if isinstance(tags, str):
tags = [tags]
if self.model_tags is None:
self.model_tags = []
for tag in tags:
if tag not in self.model_tags:
self.model_tags.append(tag)
@classmethod
@restore_default_dtype
def _from_config(cls, config, **kwargs):
"""
All context managers that the model should be initialized under go here.
Args:
dtype (`torch.dtype`, *optional*):
Override the default `dtype` and load the model under this dtype.
"""
# when we init a model from within another model (e.g. VLMs) and dispatch on FA2
# a warning is raised that dtype should be fp16. Since we never pass dtype from within
# modeling code, we can try to infer it here same way as done in `from_pretrained`
# For BC on the old `torch_dtype`
dtype = kwargs.pop("dtype", config.dtype)
if (torch_dtype := kwargs.pop("torch_dtype", None)) is not None:
logger.warning_once("`torch_dtype` is deprecated! Use `dtype` instead!")
# if both kwargs are provided, use `dtype`
dtype = dtype if dtype != config.dtype else torch_dtype
if isinstance(dtype, str):
dtype = getattr(torch, dtype)
# override default dtype if needed
dtype_orig = None
if dtype is not None:
dtype_orig = cls._set_default_dtype(dtype)
# If passing `attn_implementation` as kwargs, respect it (it will be applied recursively on subconfigs)
if "attn_implementation" in kwargs:
config._attn_implementation = kwargs.pop("attn_implementation")
if is_deepspeed_zero3_enabled() and not _is_quantized and not _is_ds_init_called:
logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
# this immediately partitions the model across all gpus, to avoid the overhead in time
# and memory copying it on CPU or each GPU first
import deepspeed
init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]
with ContextManagers(init_contexts):
model = cls(config, **kwargs)
else:
model = cls(config, **kwargs)
# restore default dtype if it was modified
if dtype_orig is not None:
torch.set_default_dtype(dtype_orig)
return model
@classmethod
def _set_default_dtype(cls, dtype: torch.dtype) -> torch.dtype:
"""
Change the default dtype and return the previous one. This is needed when wanting to instantiate the model
under specific dtype.
Args:
dtype (`torch.dtype`):
a floating dtype to set to.
Returns:
`torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was
modified. If it wasn't, returns `None`.
Note `set_default_dtype` currently only works with floating-point types and asserts if for example,
`torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception.
"""
if not dtype.is_floating_point:
raise ValueError(
f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype"
)
logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.")
dtype_orig = torch.get_default_dtype()
torch.set_default_dtype(dtype)
return dtype_orig
@property
def base_model(self) -> nn.Module:
"""
`torch.nn.Module`: The main body of the model.
"""
return getattr(self, self.base_model_prefix, self)
@classmethod
def can_generate(cls) -> bool:
"""
Returns whether this model can generate sequences with `.generate()` from the `GenerationMixin`.
Under the hood, on classes where this function returns True, some generation-specific changes are triggered:
for instance, the model instance will have a populated `generation_config` attribute.
Returns:
`bool`: Whether this model can generate sequences with `.generate()`.
"""
# Directly inherits `GenerationMixin` -> can generate
if "GenerationMixin" in str(cls.__bases__):
return True
# The class inherits from a class that can generate (recursive check) -> can generate
for base in cls.__bases__:
if not hasattr(base, "can_generate"):
continue
if "PreTrainedModel" not in str(base) and base.can_generate():
return True
# Detects whether `prepare_inputs_for_generation` has been overwritten in the model. Prior to v4.45, this
# was how we detected whether a model could generate.
if hasattr(cls, "prepare_inputs_for_generation"): # implicit: doesn't inherit `GenerationMixin`
logger.warning(
f"{cls.__name__} has generative capabilities, as `prepare_inputs_for_generation` is explicitly "
"defined. However, it doesn't directly inherit from `GenerationMixin`. From 👉v4.50👈 onwards, "
"`PreTrainedModel` will NOT inherit from `GenerationMixin`, and this model will lose the ability "
"to call `generate` and other related functions."
"\n - If you're using `trust_remote_code=True`, you can get rid of this warning by loading the "
"model with an auto class. See https://huggingface.co/docs/transformers/en/model_doc/auto#auto-classes"
"\n - If you are the owner of the model architecture code, please modify your model class such that "
"it inherits from `GenerationMixin` (after `PreTrainedModel`, otherwise you'll get an exception)."
"\n - If you are not the owner of the model architecture class, please contact the model code owner "
"to update it."
)
# Otherwise, can't generate
return False
def _flash_attn_2_can_dispatch(self, is_init_check: bool = False) -> bool:
"""
Check the availability of Flash Attention 2 for a given model.
Args:
is_init_check (`bool`, *optional*):
Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are
fully instantiated. This is needed as we also check the devices of the weights, which are only available
later after __init__. This allows to raise proper exceptions early before instantiating the full models
if we know that the model does not support the requested attention.
"""
dtype = self.config.dtype
# check `supports_flash_attn_2` for BC with custom code. TODO: remove after a few releases
if not (self._supports_flash_attn or getattr(self, "_supports_flash_attn_2", False)):
raise ValueError(
f"{self.__class__.__name__} does not support Flash Attention 2.0 yet. Please request to add support where"
f" the model is hosted, on its model hub page: https://huggingface.co/{self.config._name_or_path}/discussions/new"
" or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new"
)
if not is_flash_attn_2_available():
preface = "FlashAttention2 has been toggled on, but it cannot be used due to the following error:"
install_message = "Please refer to the documentation of https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2 to install Flash Attention 2."
# package `flash-attn` can not be installed on Ascend NPU, following validation logics can be ignored.
if is_torch_npu_available():
logger.info("Detect using FlashAttention2 on Ascend NPU.")
return True
if is_torch_xpu_available():
logger.info("Detect using FlashAttention2 (via kernel `kernels-community/flash-attn2`) on XPU.")
return True
if importlib.util.find_spec("flash_attn") is None:
raise ImportError(f"{preface} the package flash_attn seems to be not installed. {install_message}")
else:
# Check FA2 installed version compatibility
flash_attention_version = version.parse(importlib.metadata.version("flash_attn"))
if torch.version.cuda:
if flash_attention_version < version.parse("2.1.0"):
raise ImportError(
f"{preface} you need flash_attn package version to be greater or equal than 2.1.0. Detected version {flash_attention_version}. {install_message}"
)
elif not torch.cuda.is_available():
raise ValueError(
f"{preface} Flash Attention 2 is not available on CPU. Please make sure torch can access a CUDA device."
)
else:
raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}")
elif torch.version.hip:
if flash_attention_version < version.parse("2.0.4"):
raise ImportError(
f"{preface} you need flash_attn package version to be greater or equal than 2.0.4. Detected version {flash_attention_version}. {install_message}"
)
else:
raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}")
if dtype is None:
logger.warning_once(
"You are attempting to use Flash Attention 2 without specifying a torch dtype. This might lead to unexpected behaviour"
)
elif dtype is not None and dtype not in [torch.float16, torch.bfloat16]:
logger.warning_once(
"Flash Attention 2 only supports torch.float16 and torch.bfloat16 dtypes, but"
f" the current dype in {self.__class__.__name__} is {dtype}. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator,"
' or load the model with the `dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", dtype=torch.float16)`'
)
# With the early check, the parameters are not yet initialized correctly
if not is_init_check:
param_devices = list({param.device for param in self.parameters()})
if len(param_devices) == 1 and param_devices[0].type == "cpu":
if torch.cuda.is_available():
logger.warning_once(
"You are attempting to use Flash Attention 2 with a model not initialized on GPU. Make sure to move the model to GPU"
" after initializing it on CPU with `model.to('cuda')`."
)
elif is_torch_mlu_available():
logger.warning_once(
"You are attempting to use Flash Attention 2 with a model not initialized on MLU. Make sure to move the model to MLU"
" after initializing it on CPU with `model.to('mlu')`."
)
else:
raise ValueError(
"You are attempting to use Flash Attention 2 with a model not initialized on GPU and with no GPU available. "
"This is not supported yet. Please make sure to have access to a GPU and either initialise the model on a GPU by passing a device_map "
"or initialising the model on CPU and then moving it to GPU."
)
# If no error raise by this point, we can return `True`
return True
def _flash_attn_3_can_dispatch(self, is_init_check: bool = False) -> bool:
"""
Check the availability of Flash Attention 3 for a given model.
Args:
is_init_check (`bool`, *optional*):
Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are
fully instantiated. This is needed as we also check the devices of the weights, which are only available
later after __init__. This allows to raise proper exceptions early before instantiating the full models
if we know that the model does not support the requested attention.
"""
dtype = self.config.dtype
if not self._supports_flash_attn:
raise ValueError(
f"{self.__class__.__name__} does not support Flash Attention 3 yet. Please request to add support where"
f" the model is hosted, on its model hub page: https://huggingface.co/{self.config._name_or_path}/discussions/new"
" or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new"
)
if not is_flash_attn_3_available():
preface = "FlashAttention3 has been toggled on, but it cannot be used due to the following error:"
if importlib.util.find_spec("flash_attn_3") is None:
raise ImportError(f"{preface} the package flash_attn_3 seems to be not installed.")
if torch.cuda.is_available():
major, _ = torch.cuda.get_device_capability()
if major < 9:
raise ValueError(
f"{preface} Flash Attention 3 requires compute capability >= 9.0, but found {torch.cuda.get_device_capability()} with compute capability {major}.0."
)
else:
raise ImportError(f"{preface} Flash Attention 3 is not available.")
else:
raise ValueError(
f"{preface} Flash Attention 3 is not available on CPU. Please make sure torch can access a CUDA device."
)
if dtype is None:
logger.warning_once(
"You are attempting to use Flash Attention 3 without specifying a torch dtype. This might lead to unexpected behaviour"
)
elif dtype is not None and dtype not in [torch.float16, torch.bfloat16]:
logger.warning_once(
"Flash Attention 3 only supports torch.float16 and torch.bfloat16 dtypes, but"
f" the current dype in {self.__class__.__name__} is {dtype}. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator,"
' or load the model with the `dtype` argument. Example: `model = AutoModel.from_pretrained("meta-llama/Llama-3.2-1B", attn_implementation="flash_attention_3", dtype=torch.float16)`'
)
if getattr(self.config, "alibi", False) or getattr(self.config, "use_alibi", False):
raise ValueError("Model is configured to use ALiBi, which is not supported by Flash Attention 3.")
# Check for attention dropout, which is incompatible with FA3
if hasattr(self.config, "attention_dropout") and self.config.attention_dropout > 0:
raise ValueError(
f"Model has attention_dropout={self.config.attention_dropout}, which is not supported by Flash Attention 3."
)
# With the early check, the parameters are not yet initialized correctly
if not is_init_check:
param_devices = list({param.device for param in self.parameters()})
if len(param_devices) == 1 and param_devices[0].type == "cpu":
if torch.cuda.is_available():
logger.warning_once(
"You are attempting to use Flash Attention 3 with a model not initialized on GPU. Make sure to move the model to GPU"
" after initializing it on CPU with `model.to('cuda')`."
)
else:
raise ValueError(
"You are attempting to use Flash Attention 3 with a model not initialized on GPU and with no GPU available. "
"This is not supported yet. Please make sure to have access to a GPU and either initialise the model on a GPU by passing a device_map "
"or initialising the model on CPU and then moving it to GPU."
)
return True
def _sdpa_can_dispatch(self, is_init_check: bool = False) -> bool:
"""
Check the availability of SDPA for a given model.
Args:
is_init_check (`bool`, *optional*):
Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are
fully instantiated. This is needed as we also check the devices of the weights, which are only available
later after __init__. This allows to raise proper exceptions early before instantiating the full models
if we know that the model does not support the requested attention.
"""
if not self._supports_sdpa:
raise ValueError(
f"{self.__class__.__name__} does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet."
" Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe"
' this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation="eager"` meanwhile. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`'
)
if (
torch.version.hip is not None
and torch.cuda.device_count() > 1
and version.parse(torch.__version__) < version.parse("2.4.1")
):
logger.warning_once(
"Using the `SDPA` attention implementation on multi-gpu setup with ROCM may lead to performance issues due to the FA backend. Disabling it to use alternative backends."
)
torch.backends.cuda.enable_flash_sdp(False)
return True
def _flex_attn_can_dispatch(self, is_init_check: bool = False) -> bool:
"""
Check the availability of Flex Attention for a given model.
Args:
is_init_check (`bool`, *optional*):
Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are
fully instantiated. This is needed as we also check the devices of the weights, which are only available
later after __init__. This allows to raise proper exceptions early before instantiating the full models
if we know that the model does not support the requested attention.
"""
if not self._supports_flex_attn:
raise ValueError(
f"{self.__class__.__name__} does not support an attention implementation through torch's flex_attention."
" Please request the support for this architecture: https://github.com/huggingface/transformers/issues/34809."
" If you believe this error is a bug, please open an issue in Transformers GitHub repository"
' and load your model with the argument `attn_implementation="eager"` meanwhile.'
' Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`'
)
if not is_torch_flex_attn_available():
raise ImportError(
"PyTorch Flex Attention requirements in Transformers are not met. Please install torch>=2.5.0."
)
# If no error raise by this point, we can return `True`
return True
def _check_and_adjust_attn_implementation(
self, attn_implementation: Optional[str], is_init_check: bool = False
) -> str:
"""
Check that the `attn_implementation` exists and is supported by the models, and try to get the kernel from hub if
it matches hf kernels pattern.
Args:
attn_implementation (`str` or `None`):
The attention implementation to check for existence/validity.
is_init_check (`bool`, *optional*):
Whether this check is performed early, i.e. at __init__ time, or later when the model and its weights are
fully instantiated. This is needed as we also check the devices of the weights, which are only available
later after __init__. This allows to raise proper exceptions early before instantiating the full models
if we know that the model does not support the requested attention.
Returns:
`str`: The final attention implementation to use, including potential fallbacks from sdpa to eager, or from
None to sdpa (to potentially eager).
"""
applicable_attn_implementation = attn_implementation
# If FA not installed, do not fail but use kernels instead
requested_original_flash_attn = attn_implementation is not None and (
attn_implementation == "flash_attention_2" or attn_implementation == "flash_attention_3"
)
if (
requested_original_flash_attn
and self._supports_flash_attn
and not (is_flash_attn_2_available() or is_flash_attn_3_available())
and is_kernels_available()
and not is_torch_npu_available()
):
if attn_implementation.endswith("2"):
applicable_attn_implementation = "kernels-community/flash-attn2"
if is_torch_xpu_available():
# On XPU, kernels library is the native implementation
# Disabling this flag to avoid giving wrong fallbacks on errors and warnings
requested_original_flash_attn = False
else:
applicable_attn_implementation = "kernels-community/vllm-flash-attn3"
if is_kernel(applicable_attn_implementation):
try:
# preload flash attention here to allow compile with fullgraph
lazy_import_flash_attention(applicable_attn_implementation)
# log that we used kernel fallback if successful
if requested_original_flash_attn:
logger.warning_once(
f"You do not have `flash_attn` installed, using `{applicable_attn_implementation}` "
"from the `kernels` library instead!"
)
except Exception as e:
# raise the proper exception for requested flash attention
if requested_original_flash_attn:
if attn_implementation.endswith("2"):
self._flash_attn_2_can_dispatch()
else:
self._flash_attn_3_can_dispatch()
# error properly out if a kernel was specifically requested
raise e
else:
applicable_attn_implementation = self.get_correct_attn_implementation(
applicable_attn_implementation, is_init_check
)
# preload flash attention here to allow compile with fullgraph
if "flash" in applicable_attn_implementation:
lazy_import_flash_attention(applicable_attn_implementation)
return applicable_attn_implementation
def get_correct_attn_implementation(self, requested_attention: Optional[str], is_init_check: bool = False) -> str:
applicable_attention = "sdpa" if requested_attention is None else requested_attention
if applicable_attention not in ["eager"] + ALL_ATTENTION_FUNCTIONS.valid_keys():
message = (
f'Specified `attn_implementation="{applicable_attention}"` is not supported. The only possible arguments are '
'`attn_implementation="eager"`, `"paged|eager"`'
)
# check `supports_flash_attn_2` for BC with custom code. TODO: remove after a few releases
if self._supports_flash_attn or getattr(self, "_supports_flash_attn_2", False):
message += ', `"attn_implementation=flash_attention_3"`, `"attn_implementation=flash_attention_2"`, `"attn_implementation=paged|flash_attention_2"`'
if self._supports_sdpa:
message += ', `"attn_implementation=sdpa"`, `"attn_implementation=paged|sdpa"`'
if self._supports_flex_attn:
message += ', `"attn_implementation=flex_attention"`'
raise ValueError(message + ".")
# Perform relevant checks
if "flash_attention_2" in applicable_attention:
self._flash_attn_2_can_dispatch(is_init_check)
elif "flash_attention_3" in applicable_attention:
self._flash_attn_3_can_dispatch(is_init_check)
elif "flex_attention" in applicable_attention:
self._flex_attn_can_dispatch(is_init_check)
elif "sdpa" in applicable_attention:
# Sdpa is the default, so we try it and fallback to eager otherwise when not possible
try:
self._sdpa_can_dispatch(is_init_check)
except (ValueError, ImportError) as e:
if requested_attention is not None and "sdpa" in requested_attention:
raise e
applicable_attention = "eager"
return applicable_attention
@classmethod
def _can_set_attn_implementation(cls) -> bool:
"""Detect whether the class supports setting its attention implementation dynamically. It is an ugly check based on
opening the file, but avoids maintaining yet another property flag.
"""
class_file = sys.modules[cls.__module__].__file__
with open(class_file, "r") as f:
code = f.read()
# heuristic -> if we find those patterns, the model uses the correct interface
if re.search(r"class \w+Attention\(nn.Module\)", code):
return (
"eager_attention_forward" in code
and "ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]" in code
)
else:
# If no attention layer, assume `True`. Most probably a multimodal model or inherits from existing models
return True
def set_attn_implementation(self, attn_implementation: Union[str, dict]):
"""
Set the requested `attn_implementation` for this model.
Args:
attn_implementation (`str` or `dict`):
The attention implementation to set for this model. It can be either a `str`, in which case it will be
dispatched to all submodels if relevant, or a `dict` where keys are the sub_configs name, in which case each
submodel will dispatch the corresponding value.
"""
requested_implementation = (
attn_implementation
if not isinstance(attn_implementation, dict)
else attn_implementation.get("", self.config._attn_implementation)
)
if requested_implementation != self.config._attn_implementation:
# In this case, raise
if not self._can_set_attn_implementation():
logger.warning(
f"{self.__class__.__name__} does not support setting its attention implementation dynamically, because it "
"does not follow the functional approach based on AttentionInterface "
"(see https://huggingface.co/docs/transformers/en/attention_interface)"
)
else:
requested_implementation = self._check_and_adjust_attn_implementation(
requested_implementation, is_init_check=False
)
# Apply the change (on the internal attr, to avoid setting it recursively)
self.config._attn_implementation_internal = requested_implementation
# Apply it to all submodels as well
for submodule in self.modules():
# We found a submodel (which is not self) with a different config (otherwise, it may be the same "actual model",
# e.g. ForCausalLM has a Model inside, but no need to check it again)
if (
submodule is not self
and isinstance(submodule, PreTrainedModel)
and submodule.config.__class__ != self.config.__class__
# If it was already changed, no need to do it again
and not hasattr(submodule.config, "_attn_was_changed")
):
# In this case, warn and skip
if not submodule._can_set_attn_implementation():
logger.warning(
f"{submodule.__class__.__name__} does not support setting its attention implementation dynamically, because it "
"does not follow the functional approach based on AttentionInterface "
"(see https://huggingface.co/docs/transformers/en/attention_interface)"
)
# Set the attn on the submodule
else:
sub_implementation = requested_implementation
if isinstance(attn_implementation, dict):
for subconfig_key in self.config.sub_configs:
# We need to check for exact object match here, with `is`
if getattr(self.config, subconfig_key) is submodule.config:
sub_implementation = attn_implementation.get(
subconfig_key, submodule.config._attn_implementation
)
break
# Check the module can use correctly, otherwise we raise an error if requested attention can't be set for submodule
sub_implementation = submodule.get_correct_attn_implementation(sub_implementation)
submodule.config._attn_implementation_internal = sub_implementation
# Still add it as "changed" even if it was skipped, as we would otherwise try to set it in the dark afterwards
# We need to set it on the config itself, to differentiate 2 subconfigs of the same __class__ potentially
submodule.config._attn_was_changed = True
# We need this as some old and badly designed models use subconfigs without declaring the corresponding modules as PreTrainedModel
for subconfig_key in self.config.sub_configs:
if (subconfig := getattr(self.config, subconfig_key)) is not None:
sub_implementation = (
requested_implementation
if not isinstance(attn_implementation, dict)
else attn_implementation.get(subconfig_key, subconfig._attn_implementation)
)
# This means we did not perform any check above for this particular subconfig -> set it in the dark if it is registered
if (
not hasattr(subconfig, "_attn_was_changed")
# If it's already the same, then no need to enter here and raise warnings
and sub_implementation != subconfig._attn_implementation
):
if sub_implementation not in ["eager"] + ALL_ATTENTION_FUNCTIONS.valid_keys():
raise ValueError(
f'Specified `attn_implementation="{sub_implementation}"` is not supported for {subconfig_key}. '
'The only possible arguments are "eager" (manual attention implementation)'
f"or one of the following: {list(ALL_ATTENTION_FUNCTIONS.valid_keys())}"
)
subconfig._attn_implementation_internal = sub_implementation
logger.warning(
f"We set the attention implementation for the sub-config `{subconfig_key}` to `{sub_implementation}` "
"without finding the associated sub-model. For this reason we could not check if the model supports it. "
"You may encounter undefined behavior."
)
# Unset the attribute in this case, to avoid issues in the future
else:
if hasattr(subconfig, "_attn_was_changed"):
del subconfig._attn_was_changed
def enable_input_require_grads(self):
"""
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
the model weights fixed.
"""
def make_inputs_require_grads(module, input, output):
output.requires_grad_(True)
self._require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
def disable_input_require_grads(self):
"""
Removes the `_require_grads_hook`.
"""
self._require_grads_hook.remove()
def get_encoder(self, modality: Optional[str] = None):
"""
Best-effort lookup of the *encoder* module. If provided with `modality` argument,
it looks for a modality-specific encoder in multimodal models (e.g. "image_encoder")
By default the function returns model's text encoder if any, and otherwise returns `self`.
Possible `modality` values are "image", "video" and "audio".
"""
# NOTE: new models need to use existing names for layers if possible, so this list doesn't grow infinitely
if modality in ["image", "video"]:
possible_module_names = ["vision_tower", "visual", "vision_model", "vision_encoder", "image_tower"]
elif modality == "audio":
possible_module_names = ["audio_tower", "audio_encoder", "speech_encoder"]
elif modality is None:
possible_module_names = ["text_encoder", "encoder"]
else:
raise ValueError(f'Unnrecognized modality, has to be "image", "video" or "audio" but found {modality}')
for name in possible_module_names:
if hasattr(self, name):
return getattr(self, name)
if self.base_model is not self and hasattr(self.base_model, "get_encoder"):
base_encoder = self.base_model.get_encoder(modality=modality)
# Base model will always have attr `get_encoder` if inherited from `PreTrainedModel`
# But it doesn't mean that the model has an encoder module, and we need to return `self`
if base_encoder != self.base_model:
return base_encoder
# If this is a base transformer model (no encoder/model attributes), return self
return self
def set_encoder(self, encoder, modality: Optional[str] = None):
"""
Symmetric setter. Mirrors the lookup logic used in `get_encoder`.
"""
# NOTE: new models need to use existing names for layers if possible, so this list doesn't grow infinitely
if modality in ["image", "video"]:
possible_module_names = ["vision_tower", "visual", "vision_model", "vision_encoder", "image_tower"]
if modality == "audio":
possible_module_names = ["audio_tower", "audio_encoder"]
elif modality is None:
possible_module_names = ["text_encoder", "encoder"]
else:
raise ValueError(f'Unnrecognized modality, has to be "image", "video" or "audio" but found {modality}')
for name in possible_module_names:
if hasattr(self, name):
setattr(self, name, encoder)
return
if self.base_model is not self:
if hasattr(self.base_model, "set_encoder"):
self.base_model.set_encoder(encoder, modality=modality)
else:
self.model = encoder
def get_decoder(self):
"""
Best-effort lookup of the *decoder* module.
Order of attempts (covers ~85 % of current usages):
1. `self.decoder/self.language_model/self.text_model`
2. `self.base_model` (many wrappers store the decoder here)
3. `self.base_model.get_decoder()` (nested wrappers)
4. fallback: raise for the few exotic models that need a bespoke rule
"""
possible_module_names = ["language_model", "text_model", "decoder", "text_decoder"]
for name in possible_module_names:
if hasattr(self, name):
return getattr(self, name)
if self.base_model is not self and hasattr(self.base_model, "get_decoder"):
return self.base_model.get_decoder()
# If this is a base transformer model (no decoder/model attributes), return self
# This handles cases like MistralModel which is itself the decoder
return self
def set_decoder(self, decoder):
"""
Symmetric setter. Mirrors the lookup logic used in `get_decoder`.
"""
possible_module_names = ["language_model", "text_model", "decoder"]
for name in possible_module_names:
if hasattr(self, name):
print(name)
setattr(self, name, decoder)
return
if self.base_model is not self:
if hasattr(self.base_model, "set_decoder"):
self.base_model.set_decoder(decoder)
else:
self.model = decoder
@torch.no_grad()
def _init_weights(self, module):
"""
Initialize the weights. This is quite general on purpose, in the spirit of what we usually do. For more complex
initialization scheme, it should be overridden by the derived `PreTrainedModel` class. In case a model adds an explicit
`nn.Parameter`, this method should also be overridden in order to initialize it correctly.
"""
if hasattr(self.config, "initializer_range"):
std = self.config.initializer_range or 0.02
elif hasattr(self.config, "init_std"):
std = self.config.init_std
elif hasattr(self.config, "initializer_factor"):
std = self.config.initializer_factor
else:
# 0.02 is the standard default value across the library
std = getattr(self.config.get_text_config(), "initializer_range", 0.02)
if isinstance(module, (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose1d, nn.ConvTranspose2d)):
if getattr(module, "weight", None) is not None:
init.normal_(module.weight, mean=0.0, std=std)
if getattr(module, "bias", None) is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
if getattr(module, "weight", None) is not None:
init.normal_(module.weight, mean=0.0, std=std)
# Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag
if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False):
init.zeros_(module.weight[module.padding_idx])
elif isinstance(module, nn.MultiheadAttention):
# This uses torch's original init
module._reset_parameters()
# We cannot use `isinstance` on the RMSNorms or LayerNorms, as they usually are custom modules which change names
# between modelings (because they are prefixed with the model name)
elif (
isinstance(module, (nn.GroupNorm, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d))
or "LayerNorm" in module.__class__.__name__
or "RMSNorm" in module.__class__.__name__
):
# Norms can exist without weights (in which case they are None from torch primitives)
if hasattr(module, "weight") and module.weight is not None:
init.ones_(module.weight)
if hasattr(module, "bias") and module.bias is not None:
init.zeros_(module.bias)
def _initialize_weights(self, module):
"""
Initialize the weights if they are not already initialized.
"""
if getattr(module, "_is_hf_initialized", False):
return
self._init_weights(module)
module._is_hf_initialized = True
@torch.no_grad()
@init.guard_torch_init_functions()
def initialize_weights(self):
"""
This is equivalent to calling `self.apply(self._initialize_weights)`, but correctly handles composite models.
This function dynamically dispatches the correct `init_weights` function to the modules as we advance in the
module graph along the recursion. It can handle an arbitrary number of sub-models. Without it, every composite
model would have to recurse a second time on all sub-models explicitly in the outer-most `_init_weights`, which
is extremely error prone and inefficient.
"""
if not hasattr(torch.nn.Module, "smart_apply"):
# This function is equivalent to `torch.nn.Module.apply`, except that it dynamically adjust the function
# to apply as we go down the graph
def smart_apply(self, fn):
for module in self.children():
# We found a sub-model: recursively dispatch its own init function now!
if isinstance(module, PreTrainedModel):
module.smart_apply(module._initialize_weights)
else:
module.smart_apply(fn)
fn(self)
return self
torch.nn.Module.smart_apply = smart_apply
# Let the magic happen with this simple call
self.smart_apply(self._initialize_weights)
def get_expanded_tied_weights_keys(self, all_submodels: bool = False) -> dict:
r"""
Return the expanded tied weight keys (in case they contain modules or regex patterns) for only the current
model, or recursively for all submodels if `all_submodels=True` (i.e. it will re-check the config values for all
submodels).
For almost all models, we only require to tie the embeddings, so the model has an internal property
`_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}`. In this case, the mapping is already
"expanded", i.e. it already contains full parameters, and this function will simply return a copy of the property.
For more complex patterns, e.g. for `DFineForObjectDetection`, we have the following attribute
```
_tied_weights_keys = {
r"bbox_embed.(?![0])\d+": "bbox_embed.0",
r"class_embed.(?![0])\d+": "class_embed.0",
"model.decoder.class_embed": "class_embed",
"model.decoder.bbox_embed": "bbox_embed",
}
```
In this case, the function looks up all the model's parameters and buffers, and matches all the params,
returning the following:
```
{
'bbox_embed.1.layers.0.bias': 'bbox_embed.0.layers.0.bias',
'bbox_embed.1.layers.0.weight': 'bbox_embed.0.layers.0.weight',
'bbox_embed.1.layers.1.bias': 'bbox_embed.0.layers.1.bias',
'bbox_embed.1.layers.1.weight': 'bbox_embed.0.layers.1.weight',
'bbox_embed.1.layers.2.bias': 'bbox_embed.0.layers.2.bias',
'bbox_embed.1.layers.2.weight': 'bbox_embed.0.layers.2.weight',
'bbox_embed.2.layers.0.bias': 'bbox_embed.0.layers.0.bias',
'bbox_embed.2.layers.0.weight': 'bbox_embed.0.layers.0.weight',
...
'class_embed.1.bias': 'class_embed.0.bias',
'class_embed.1.weight': 'class_embed.0.weight',
'class_embed.2.bias': 'class_embed.0.bias',
'class_embed.2.weight': 'class_embed.0.weight',
...
'model.decoder.class_embed.0.bias': 'class_embed.0.bias',
'model.decoder.class_embed.0.weight': 'class_embed.0.weight',
'model.decoder.class_embed.1.bias': 'class_embed.0.bias',
'model.decoder.class_embed.1.weight': 'class_embed.0.weight',
...
'model.decoder.bbox_embed.0.layers.0.bias': 'bbox_embed.0.layers.0.bias',
'model.decoder.bbox_embed.0.layers.0.weight': 'bbox_embed.0.layers.0.weight',
'model.decoder.bbox_embed.0.layers.1.bias': 'bbox_embed.0.layers.1.bias',
'model.decoder.bbox_embed.0.layers.1.weight': 'bbox_embed.0.layers.1.weight',
...
}
```
i.e. all the parameters matching the regex and modules patterns in `_tied_weights_keys`
"""
if all_submodels:
expanded_tied_weights = {}
for prefix, submodule in self.named_modules(remove_duplicate=False):
if isinstance(submodule, PreTrainedModel):
# Will dynamically check the config if it has changed
submodel_tied_weights = submodule.get_expanded_tied_weights_keys(all_submodels=False)
if prefix != "":
submodel_tied_weights = {
f"{prefix}.{k}": f"{prefix}.{v}" for k, v in submodel_tied_weights.items()
}
expanded_tied_weights.update(submodel_tied_weights)
return expanded_tied_weights
tied_mapping = self._tied_weights_keys
# If the config does not specify any tying, return empty dict
if not self.config.tie_word_embeddings and not self.config.tie_encoder_decoder:
return {}
# If None, return empty dict
elif tied_mapping is None:
return {}
# Short-cut for the most common cases: if the tied weights mapping only contains already expanded params,
# return it directly (the regex matches names containing only letters, numbers, dots, and underscores to make
# sure it does not contain a regex pattern, and finishing by "bias" or "weight" to make sure it's not a module)
common_case_regex = re.compile(r"^[A-Za-z0-9_\.]+(weight)|(bias)$")
if all(common_case_regex.match(k) for k in tied_mapping.keys() | tied_mapping.values()):
return tied_mapping.copy()
# We need to expand the regex patterns or the modules into proper parameters
expanded_tied_weights = {}
all_param_names = {k for k, _ in self.named_parameters(remove_duplicate=False)} | {
k for k, _ in self.named_buffers(remove_duplicate=False)
}
for target_name, source_name in tied_mapping.items():
target_name = "^" + target_name
source_name = "^" + source_name
source_params = sorted(filter(lambda x: re.search(source_name, x), all_param_names))
target_params = sorted(filter(lambda x: re.search(target_name, x), all_param_names))
if (
not len(source_params) > 0
or not len(target_params) > 0
or len(target_params) % len(source_params) != 0
):
raise ValueError(
f"There is an issue with your definition of `tie_weights_keys` for {source_name}:{target_name}. "
f"We found {source_params} to tie into {target_params}"
)
# we cycle source as it should be dispatch in many target if regex
for target_n, source_n in zip(target_params, cycle(source_params)):
# If the source is already registed as a target, use the original corresponding source. This should never
# happen in general, but some models such as `d_fine` have complicated regex patterns, so it end up being
# the case for simplicity of the regexes. Fix it silently here
if source_n in expanded_tied_weights.keys():
# Use original source instead of having keys both as source and targets
expanded_tied_weights[target_n] = expanded_tied_weights[source_n]
# Usual case, everything is already correct
else:
expanded_tied_weights[target_n] = source_n
return expanded_tied_weights
def tie_weights(self, missing_keys: Optional[set[str]] = None, recompute_mapping: bool = True):
"""
Tie the model weights. If `recompute_mapping=False` (default when called internally), it will rely on the
`model.all_tied_weights_keys` attribute, containing the `{target: source}` mapping for the tied params.
If `recompute_mapping=True`, it will re-check all internal submodels and their config to determine the params
that need to be tied. This is the default when `model.tie_weights()` is called on its own, outside of
`__init__`, and `from_pretrained`, in case the config values were changed somewhere.
"""
# In this case, the keys stored in `all_tied_weights_keys` are already correct
if not recompute_mapping:
tied_keys = self.all_tied_weights_keys
else:
tied_keys = self.get_expanded_tied_weights_keys(all_submodels=True)
for target_param_name, source_param_name in tied_keys.items():
source_param = self.get_parameter_or_buffer(source_param_name)
if "." in target_param_name:
parent_name, name = target_param_name.rsplit(".", 1)
parent = self.get_submodule(parent_name)
else:
name = target_param_name
parent = self
setattr(parent, name, source_param)
self._adjust_bias(parent, source_param)
if missing_keys is not None:
source_is_there = source_param_name not in missing_keys
target_is_there = target_param_name not in missing_keys
# If we tied correctly, remove the target from the missing keys
if source_is_there:
missing_keys.discard(target_param_name)
# If the source is not present, but the target is, the checkpoint is corrupted
# TODO: maybe we could simply tie in the opposite direction here instead of error?
elif target_is_there:
raise ValueError(
f"This checkpoint seem corrupted. The tied weights mapping for this model specifies to tie "
f"{source_param_name} (which should be present and is not), to {target_param_name} (which is "
f"present)."
)
def _adjust_bias(self, output_embeddings, input_embeddings):
if getattr(output_embeddings, "bias", None) is not None and hasattr(output_embeddings, "weight"):
weight_shape = output_embeddings.weight.shape
output_embeddings.bias.data = nn.functional.pad(
output_embeddings.bias.data,
(0, weight_shape[0] - output_embeddings.bias.shape[0]),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def _get_no_split_modules(self, device_map: str):
"""
Get the modules of the model that should not be spit when using device_map. We iterate through the modules to
get the underlying `_no_split_modules`.
Args:
device_map (`str`):
The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"]
Returns:
`list[str]`: List of modules that should not be split
"""
_no_split_modules = set()
modules_to_check = [self]
while len(modules_to_check) > 0:
module = modules_to_check.pop(-1)
# if the module does not appear in _no_split_modules, we also check the children
if module.__class__.__name__ not in _no_split_modules:
if isinstance(module, PreTrainedModel):
if module._no_split_modules is None:
raise ValueError(
f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model "
"class needs to implement the `_no_split_modules` attribute."
)
else:
_no_split_modules = _no_split_modules | set(module._no_split_modules)
modules_to_check += list(module.children())
return list(_no_split_modules)
def resize_token_embeddings(
self,
new_num_tokens: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
mean_resizing: bool = True,
) -> nn.Embedding:
"""
Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens (`int`, *optional*):
The new number of tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just
returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything.
pad_to_multiple_of (`int`, *optional*):
If set will pad the embedding matrix to a multiple of the provided value.If `new_num_tokens` is set to
`None` will just pad the embedding to a multiple of `pad_to_multiple_of`.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more
details about this, or help on choosing the correct value for resizing, refer to this guide:
https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc
mean_resizing (`bool`):
Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and
covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`.
Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models,
where the generated tokens' probabilities won't be affected by the added embeddings because initializing the new embeddings with the
old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings.
Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html
Return:
`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
"""
model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
if new_num_tokens is None and pad_to_multiple_of is None:
return model_embeds
# Since we are basically reusing the same old embeddings with new weight values, gathering is required
is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None
if is_deepspeed_zero3_enabled() and not is_quantized:
import deepspeed
with deepspeed.zero.GatheredParameters(model_embeds.weight, modifier_rank=None):
vocab_size = model_embeds.weight.shape[0]
else:
vocab_size = model_embeds.weight.shape[0]
# Update base model and current model config.
self.config.get_text_config().vocab_size = vocab_size
self.vocab_size = vocab_size
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None, mean_resizing=True):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(
old_embeddings, new_num_tokens, pad_to_multiple_of, mean_resizing
)
if hasattr(old_embeddings, "_hf_hook"):
hook = old_embeddings._hf_hook
add_hook_to_module(new_embeddings, hook)
old_embeddings_requires_grad = old_embeddings.weight.requires_grad
new_embeddings.requires_grad_(old_embeddings_requires_grad)
self.set_input_embeddings(new_embeddings)
is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None
# Update new_num_tokens with the actual size of new_embeddings
if pad_to_multiple_of is not None:
if is_deepspeed_zero3_enabled() and not is_quantized:
import deepspeed
with deepspeed.zero.GatheredParameters(new_embeddings.weight, modifier_rank=None):
new_num_tokens = new_embeddings.weight.shape[0]
else:
new_num_tokens = new_embeddings.weight.shape[0]
# if word embeddings are not tied, make sure that lm head is resized as well
if (
self.get_output_embeddings() is not None
and not self.config.get_text_config(decoder=True).tie_word_embeddings
):
old_lm_head = self.get_output_embeddings()
if isinstance(old_lm_head, torch.nn.Embedding):
new_lm_head = self._get_resized_embeddings(old_lm_head, new_num_tokens, mean_resizing=mean_resizing)
else:
new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens, mean_resizing=mean_resizing)
if hasattr(old_lm_head, "_hf_hook"):
hook = old_lm_head._hf_hook
add_hook_to_module(new_lm_head, hook)
old_lm_head_requires_grad = old_lm_head.weight.requires_grad
new_lm_head.requires_grad_(old_lm_head_requires_grad)
self.set_output_embeddings(new_lm_head)
return self.get_input_embeddings()
def _get_resized_embeddings(
self,
old_embeddings: nn.Embedding,
new_num_tokens: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
mean_resizing: bool = True,
) -> nn.Embedding:
"""
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (`torch.nn.Embedding`):
Old embeddings to be resized.
new_num_tokens (`int`, *optional*):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or `None`, just returns a pointer to the input tokens
`torch.nn.Embedding` module of the model without doing anything.
pad_to_multiple_of (`int`, *optional*):
If set will pad the embedding matrix to a multiple of the provided value. If `new_num_tokens` is set to
`None` will just pad the embedding to a multiple of `pad_to_multiple_of`.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more
details about this, or help on choosing the correct value for resizing, refer to this guide:
https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc
mean_resizing (`bool`):
Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and
covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`.
Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models,
where the generated tokens' probabilities will not be affected by the added embeddings because initializing the new embeddings with the
old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings.
Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html
Return:
`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
`new_num_tokens` is `None`
"""
if pad_to_multiple_of is not None:
if not isinstance(pad_to_multiple_of, int):
raise ValueError(
f"Asking to pad the embedding matrix to a multiple of `{pad_to_multiple_of}`, which is not and integer. Please make sure to pass an integer"
)
if new_num_tokens is None:
new_num_tokens = old_embeddings.weight.shape[0]
new_num_tokens = ((new_num_tokens + pad_to_multiple_of - 1) // pad_to_multiple_of) * pad_to_multiple_of
else:
logger.info(
"You are resizing the embedding layer without providing a `pad_to_multiple_of` parameter. This means that the new embedding"
f" dimension will be {new_num_tokens}. This might induce some performance reduction as *Tensor Cores* will not be available."
" For more details about this, or help on choosing the correct value for resizing, refer to this guide:"
" https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc"
)
if new_num_tokens is None:
return old_embeddings
is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None
if is_deepspeed_zero3_enabled() and not is_quantized:
import deepspeed
with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None):
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
else:
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled():
return old_embeddings
if not isinstance(old_embeddings, nn.Embedding):
raise TypeError(
f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. You"
" should either use a different resize function or make sure that `old_embeddings` are an instance of"
f" {nn.Embedding}."
)
# Build new embeddings
# When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init
# because the shape of the new embedding layer is used across various modeling files
# as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading
# to errors when training.
new_embeddings = nn.Embedding(
new_num_tokens,
old_embedding_dim,
device=old_embeddings.weight.device,
dtype=old_embeddings.weight.dtype,
)
if new_num_tokens > old_num_tokens and not mean_resizing:
# initialize new embeddings (in particular added tokens) with a mean of 0 and std equals `config.initializer_range`.
self._init_weights(new_embeddings)
elif new_num_tokens > old_num_tokens and mean_resizing:
# initialize new embeddings (in particular added tokens). The new embeddings will be initialized
# from a multivariate normal distribution that has old embeddings' mean and covariance.
# as described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html
logger.warning_once(
"The new embeddings will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance. "
"As described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html. "
"To disable this, use `mean_resizing=False`"
)
added_num_tokens = new_num_tokens - old_num_tokens
if is_deepspeed_zero3_enabled() and not is_quantized:
import deepspeed
with deepspeed.zero.GatheredParameters([old_embeddings.weight], modifier_rank=None):
self._init_added_embeddings_weights_with_mean(
old_embeddings, new_embeddings, old_num_tokens, added_num_tokens
)
else:
self._init_added_embeddings_weights_with_mean(
old_embeddings, new_embeddings, old_num_tokens, added_num_tokens
)
# Copy token embeddings from the previous weights
# numbers of tokens to copy
n = min(old_num_tokens, new_num_tokens)
if is_deepspeed_zero3_enabled() and not is_quantized:
import deepspeed
params = [old_embeddings.weight, new_embeddings.weight]
with deepspeed.zero.GatheredParameters(params, modifier_rank=0):
new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
else:
new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
# Replace weights in old_embeddings and return to maintain the same embedding type.
# This ensures correct functionality when a Custom Embedding class is passed as input.
# The input and output embedding types remain consistent. (c.f. https://github.com/huggingface/transformers/pull/31979)
if is_deepspeed_zero3_enabled() and not is_quantized:
import deepspeed
params = [old_embeddings.weight, new_embeddings.weight]
with deepspeed.zero.GatheredParameters(params, modifier_rank=0):
old_embeddings.weight = new_embeddings.weight
old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0]
# If the new number of tokens is smaller than the original `padding_idx`, the `padding_idx`
# will be set to `None` in the resized embeddings.
if old_embeddings.padding_idx is not None and (new_num_tokens - 1) < old_embeddings.padding_idx:
old_embeddings.padding_idx = None
else:
old_embeddings.weight.data = new_embeddings.weight.data
old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0]
if old_embeddings.padding_idx is not None and (new_num_tokens - 1) < old_embeddings.padding_idx:
old_embeddings.padding_idx = None
return old_embeddings
def _get_resized_lm_head(
self,
old_lm_head: nn.Linear,
new_num_tokens: Optional[int] = None,
transposed: bool = False,
mean_resizing: bool = True,
) -> nn.Linear:
"""
Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end
Args:
old_lm_head (`torch.nn.Linear`):
Old lm head liner layer to be resized.
new_num_tokens (`int`, *optional*):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or `None`, just returns a pointer to the input tokens
`torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults
to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim,
vocab_size` else `vocab_size, lm_head_dim`.
mean_resizing (`bool`):
Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and
covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`.
Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models,
where the generated tokens' probabilities will not be affected by the added embeddings because initializing the new embeddings with the
old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings.
Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html
Return:
`torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is
`None`
"""
if new_num_tokens is None:
return old_lm_head
is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None
if is_deepspeed_zero3_enabled() and not is_quantized:
import deepspeed
with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None):
old_num_tokens, old_lm_head_dim = (
old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
)
else:
old_num_tokens, old_lm_head_dim = (
old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
)
if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled():
return old_lm_head
if not isinstance(old_lm_head, nn.Linear):
raise TypeError(
f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You"
" should either use a different resize function or make sure that `old_lm_head` are an instance of"
f" {nn.Linear}."
)
# Build new lm head
new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim)
has_new_lm_head_bias = old_lm_head.bias is not None
# When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init
# because the shape of the new embedding layer is used across various modeling files
# as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading
# to errors when training.
new_lm_head = nn.Linear(
*new_lm_head_shape,
bias=has_new_lm_head_bias,
device=old_lm_head.weight.device,
dtype=old_lm_head.weight.dtype,
)
if new_num_tokens > old_num_tokens and not mean_resizing:
# initialize new embeddings (in particular added tokens) with a mean of 0 and std equals `config.initializer_range`.
self._init_weights(new_lm_head)
elif new_num_tokens > old_num_tokens and mean_resizing:
# initialize new lm_head weights (in particular added tokens). The new lm_head weights
# will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance.
# as described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html
logger.warning_once(
"The new lm_head weights will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance. "
"As described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html. "
"To disable this, use `mean_resizing=False`"
)
added_num_tokens = new_num_tokens - old_num_tokens
if is_deepspeed_zero3_enabled() and not is_quantized:
import deepspeed
params = [old_lm_head.weight]
if has_new_lm_head_bias:
params += [old_lm_head.bias]
with deepspeed.zero.GatheredParameters(params, modifier_rank=None):
self._init_added_lm_head_weights_with_mean(
old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed
)
if has_new_lm_head_bias:
self._init_added_lm_head_bias_with_mean(old_lm_head, new_lm_head, added_num_tokens)
else:
self._init_added_lm_head_weights_with_mean(
old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed
)
if has_new_lm_head_bias:
self._init_added_lm_head_bias_with_mean(old_lm_head, new_lm_head, added_num_tokens)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
if is_deepspeed_zero3_enabled() and not is_quantized:
import deepspeed
params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias]
with deepspeed.zero.GatheredParameters(params, modifier_rank=0):
self._copy_lm_head_original_to_resized(
new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias
)
else:
self._copy_lm_head_original_to_resized(
new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias
)
return new_lm_head
def _init_added_embeddings_weights_with_mean(
self, old_embeddings, new_embeddings, old_num_tokens, added_num_tokens
):
old_embeddings_weight = old_embeddings.weight.data.to(torch.float32)
mean_embeddings = torch.mean(old_embeddings_weight, axis=0)
old_centered_embeddings = old_embeddings_weight - mean_embeddings
covariance = old_centered_embeddings.T @ old_centered_embeddings / old_num_tokens
# Check if the covariance is positive definite.
epsilon = 1e-9
is_covariance_psd = constraints.positive_definite.check(epsilon * covariance).all()
if is_covariance_psd:
# If covariances is positive definite, a distribution can be created. and we can sample new weights from it.
distribution = torch.distributions.multivariate_normal.MultivariateNormal(
mean_embeddings, covariance_matrix=epsilon * covariance
)
new_embeddings.weight.data[-1 * added_num_tokens :, :] = distribution.sample(
sample_shape=(added_num_tokens,)
).to(old_embeddings.weight.dtype)
else:
# Otherwise, just initialize with the mean. because distribution will not be created.
new_embeddings.weight.data[-1 * added_num_tokens :, :] = (
mean_embeddings[None, :].repeat(added_num_tokens, 1).to(old_embeddings.weight.dtype)
)
def _init_added_lm_head_weights_with_mean(
self,
old_lm_head,
new_lm_head,
old_lm_head_dim,
old_num_tokens,
added_num_tokens,
transposed: bool = False,
):
if transposed:
# Transpose to the desired shape for the function.
new_lm_head.weight.data = new_lm_head.weight.data.T
old_lm_head.weight.data = old_lm_head.weight.data.T
# The same initialization logic as Embeddings.
self._init_added_embeddings_weights_with_mean(old_lm_head, new_lm_head, old_num_tokens, added_num_tokens)
if transposed:
# Transpose again to the correct shape.
new_lm_head.weight.data = new_lm_head.weight.data.T
old_lm_head.weight.data = old_lm_head.weight.data.T
def _init_added_lm_head_bias_with_mean(self, old_lm_head, new_lm_head, added_num_tokens):
bias_mean = torch.mean(old_lm_head.bias.data, axis=0, dtype=torch.float32)
bias_std = torch.std(old_lm_head.bias.data, axis=0).to(torch.float32)
new_lm_head.bias.data[-1 * added_num_tokens :].normal_(mean=bias_mean, std=1e-9 * bias_std)
def _copy_lm_head_original_to_resized(
self, new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias
):
# Copy old lm head weights to new lm head
if not transposed:
new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :]
else:
new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy]
# Copy bias weights to new lm head
if has_new_lm_head_bias:
new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]
def resize_position_embeddings(self, new_num_position_embeddings: int):
raise NotImplementedError(
f"`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should "
f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`"
)
def get_position_embeddings(self) -> Union[nn.Embedding, tuple[nn.Embedding]]:
raise NotImplementedError(
f"`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should "
f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`"
)
def init_weights(self):
"""
Maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any
initialization logic in `_init_weights`.
"""
if _init_weights:
# Initialize weights
self.initialize_weights()
# Tie weights needs to be called here, but it can use the pre-computed `all_tied_weights_keys`
self.tie_weights(recompute_mapping=False)
def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):
"""
Activates gradient checkpointing for the current model.
We pass the `__call__` method of the modules instead of `forward` because `__call__` attaches all the hooks of
the module. https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
Args:
gradient_checkpointing_kwargs (dict, *optional*):
Additional keyword arguments passed along to the `torch.utils.checkpoint.checkpoint` function.
"""
if not self.supports_gradient_checkpointing:
raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
if gradient_checkpointing_kwargs is None:
gradient_checkpointing_kwargs = {"use_reentrant": True}
gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs)
# For old GC format (transformers < 4.35.0) for models that live on the Hub
# we will fall back to the overwritten `_set_gradient_checkpointing` method
_is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters
if not _is_using_old_format:
self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func)
else:
self.apply(partial(self._set_gradient_checkpointing, value=True))
logger.warning(
"You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)."
"Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model."
)
if getattr(self, "_hf_peft_config_loaded", False):
# When using PEFT + gradient checkpointing + Trainer we need to make sure the input has requires_grad=True
# we do it also on PEFT: https://github.com/huggingface/peft/blob/85013987aa82aa1af3da1236b6902556ce3e483e/src/peft/peft_model.py#L334
# When training with PEFT, only LoRA layers will have requires grad set to True, but the output of frozen layers need to propagate
# the gradients to make sure the gradient flows.
self.enable_input_require_grads()
def _set_gradient_checkpointing(self, enable: bool = True, gradient_checkpointing_func: Callable = checkpoint):
is_gradient_checkpointing_set = False
# Apply it on the top-level module in case the top-level modules supports it
# for example, LongT5Stack inherits from `PreTrainedModel`.
if hasattr(self, "gradient_checkpointing"):
self._gradient_checkpointing_func = gradient_checkpointing_func
self.gradient_checkpointing = enable
is_gradient_checkpointing_set = True
for module in self.modules():
if hasattr(module, "gradient_checkpointing"):
module._gradient_checkpointing_func = gradient_checkpointing_func
module.gradient_checkpointing = enable
is_gradient_checkpointing_set = True
if not is_gradient_checkpointing_set:
raise ValueError(
f"{self.__class__.__name__} is not compatible with gradient checkpointing. Make sure all the architecture support it by setting a boolean attribute"
" `gradient_checkpointing` to modules of the model that uses checkpointing."
)
def gradient_checkpointing_disable(self):
"""
Deactivates gradient checkpointing for the current model.
"""
if self.supports_gradient_checkpointing:
# For old GC format (transformers < 4.35.0) for models that live on the Hub
# we will fall back to the overwritten `_set_gradient_checkpointing` method
_is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters
if not _is_using_old_format:
self._set_gradient_checkpointing(enable=False)
else:
logger.warning(
"You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)."
"Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model."
)
self.apply(partial(self._set_gradient_checkpointing, value=False))
if getattr(self, "_hf_peft_config_loaded", False):
self.disable_input_require_grads()
@property
def is_gradient_checkpointing(self) -> bool:
"""
Whether gradient checkpointing is activated for this model or not.
"""
return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules())
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
is_main_process: bool = True,
state_dict: Optional[dict] = None,
save_function: Callable = torch.save,
push_to_hub: bool = False,
max_shard_size: Union[int, str] = "5GB",
safe_serialization: bool = True,
variant: Optional[str] = None,
token: Optional[Union[str, bool]] = None,
save_peft_format: bool = True,
save_original_format: bool = True,
**kwargs,
):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
[`~PreTrainedModel.from_pretrained`] class method.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to which to save. Will be created if it doesn't exist.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful when in distributed training like
TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
the main process to avoid race conditions.
state_dict (nested dictionary of `torch.Tensor`):
The state dictionary of the model to save. Will default to `self.state_dict()`, but can be used to only
save parts of the model or if special precautions need to be taken when recovering the state dictionary
of a model (like when using model parallelism).
save_function (`Callable`):
The function to use to save the state dictionary. Useful on distributed training like TPUs when one
need to replace `torch.save` by another method.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`):
The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).
We default it to 5GB in order for models to be able to run easily on free-tier google colab instances
without CPU OOM issues.
<Tip warning={true}>
If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
which will be bigger than `max_shard_size`.
</Tip>
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
variant (`str`, *optional*):
If specified, weights are saved in the format pytorch_model.<variant>.bin.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
the token generated when running `hf auth login` (stored in `~/.huggingface`).
save_peft_format (`bool`, *optional*, defaults to `True`):
For backward compatibility with PEFT library, in case adapter weights are attached to the model, all
keys of the state dict of adapters needs to be prepended with `base_model.model`. Advanced users can
disable this behaviours by setting `save_peft_format` to `False`.
save_original_format (`bool`, *optional*, defaults to `True`):
For backward compatibility with the previous versions of `transfomers` you can save the checkpoint with
its reverse mapping. The reverse mapping needs to exists even if the model was loaded from a None legacy
checkpoint.
kwargs (`dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
if token is not None:
kwargs["token"] = token
_hf_peft_config_loaded = getattr(self, "_hf_peft_config_loaded", False)
hf_quantizer = getattr(self, "hf_quantizer", None)
quantization_serializable = (
hf_quantizer is not None
and isinstance(hf_quantizer, HfQuantizer)
and hf_quantizer.is_serializable(safe_serialization=safe_serialization)
)
if hf_quantizer is not None and not _hf_peft_config_loaded and not quantization_serializable:
raise ValueError(
f"The model is quantized with {hf_quantizer.quantization_config.quant_method} and is not serializable - check out the warnings from"
" the logger on the traceback to understand the reason why the quantized model is not serializable."
)
if "save_config" in kwargs:
warnings.warn(
"`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead."
)
is_main_process = kwargs.pop("save_config")
# we need to check against tp_size, not tp_plan, as tp_plan is substituted to the class one
if self._tp_size is not None and not is_huggingface_hub_greater_or_equal("0.31.4"):
raise ImportError(
"Saving a model with tensor parallelism requires `huggingface_hub` version 0.31.4 or higher."
)
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
create_pr = kwargs.pop("create_pr", False)
repo_id = create_repo(repo_id, exist_ok=True, **kwargs).repo_id
files_timestamps = self._get_files_timestamps(save_directory)
metadata = {}
if hf_quantizer is not None:
state_dict, metadata = hf_quantizer.get_state_dict_and_metadata(self, safe_serialization)
metadata["format"] = "pt"
# Only save the model itself if we are using distributed training
model_to_save = unwrap_model(self)
# save the string version of dtype to the config, e.g. convert torch.float32 => "float32"
# we currently don't use this setting automatically, but may start to use with v5
dtype = model_to_save.dtype
model_to_save.config.dtype = str(dtype).split(".")[1]
# Attach architecture to the config
# When using FSDP2, unwrapping is a noop, so the model name doesn't change back to the original model name
model_to_save.config.architectures = [model_to_save.__class__.__name__.removeprefix("FSDP")]
# If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self.config)
# Save the config
if is_main_process:
if not _hf_peft_config_loaded:
model_to_save.config.save_pretrained(save_directory)
if self.can_generate():
model_to_save.generation_config.save_pretrained(save_directory)
if _hf_peft_config_loaded:
logger.info(
"Detected adapters on the model, saving the model in the PEFT format, only adapter weights will be saved."
)
state_dict = model_to_save.get_adapter_state_dict(state_dict=state_dict)
if save_peft_format:
logger.info(
"To match the expected format of the PEFT library, all keys of the state dict of adapters will be prepended with `base_model.model`."
)
peft_state_dict = {}
for key, value in state_dict.items():
peft_state_dict[f"base_model.model.{key}"] = value
state_dict = peft_state_dict
active_adapter = self.active_adapters()
if len(active_adapter) > 1:
raise ValueError(
"Multiple active adapters detected, saving multiple active adapters is not supported yet. You can save adapters separately one by one "
"by iteratively calling `model.set_adapter(adapter_name)` then `model.save_pretrained(...)`"
)
active_adapter = active_adapter[0]
current_peft_config = self.peft_config[active_adapter]
current_peft_config.save_pretrained(save_directory)
# for offloaded modules
module_map = {}
# Save the model
if state_dict is None:
# if any model parameters are offloaded, make module map
if (
hasattr(self, "hf_device_map")
and len(set(self.hf_device_map.values())) > 1
and ("cpu" in self.hf_device_map.values() or "disk" in self.hf_device_map.values())
):
warnings.warn(
"Attempting to save a model with offloaded modules. Ensure that unallocated cpu memory exceeds the `shard_size` (5GB default)"
)
for name, module in model_to_save.named_modules():
if name == "":
continue
module_state_dict = module.state_dict()
for key in module_state_dict:
module_map[name + f".{key}"] = module
state_dict = model_to_save.state_dict()
# Translate state_dict from smp to hf if saving with smp >= 1.10
if IS_SAGEMAKER_MP_POST_1_10:
for smp_to_hf, _ in smp.state.module_manager.translate_functions:
state_dict = smp_to_hf(state_dict)
# Handle the case where some state_dict keys shouldn't be saved
if self._keys_to_ignore_on_save is not None:
for ignore_key in self._keys_to_ignore_on_save:
if ignore_key in state_dict:
del state_dict[ignore_key]
# If model was sharded, we cannot properly determine sizes of tensors that `local_*` strategy was used,
# therefore we replace them with DTensors that are equivalently sharded
if self._tp_size is not None:
state_dict = replace_state_dict_local_with_dtensor(state_dict, self._tp_plan, self._device_mesh)
if safe_serialization:
# TODO: fix safe_serialization for tied weights
# Safetensors does not allow tensor aliasing.
# We're going to remove aliases before saving
ptrs = collections.defaultdict(list)
for name, tensor in state_dict.items():
if not isinstance(tensor, torch.Tensor):
# Sometimes in the state_dict we have non-tensor objects.
# e.g. in bitsandbytes we have some `str` objects in the state_dict
# In the non-tensor case, fall back to the pointer of the object itself
ptrs[id(tensor)].append(name)
elif tensor.device.type == "meta":
# In offloaded cases, there may be meta tensors in the state_dict.
# For these cases, key by the pointer of the original tensor object
# (state_dict tensors are detached and therefore no longer shared)
tensor = self.get_parameter(name)
ptrs[id(tensor)].append(name)
else:
ptrs[id_tensor_storage(tensor)].append(name)
shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
# Recursively descend to find tied weight keys
_tied_weights_keys = set(_get_tied_weight_keys(self))
error_names = []
to_delete_names = set()
for names in shared_ptrs.values():
# Removing the keys which are declared as known duplicates on
# load. This allows to make sure the name which is kept is consistent.
if _tied_weights_keys is not None:
found = 0
for name in sorted(names):
matches_pattern = any(re.search(pat, name) for pat in _tied_weights_keys)
if matches_pattern and name in state_dict:
found += 1
if found < len(names):
to_delete_names.add(name)
# We are entering a place where the weights and the transformers configuration do NOT match.
shared_names, disjoint_names = _find_disjoint(shared_ptrs.values(), state_dict)
# Those are actually tensor sharing but disjoint from each other, we can safely clone them
# Reloaded won't have the same property, but it shouldn't matter in any meaningful way.
for name in disjoint_names:
state_dict[name] = state_dict[name].clone()
# When not all duplicates have been cleaned, still remove those keys, but put a clear warning.
# If the link between tensors was done at runtime then `from_pretrained` will not get
# the key back leading to random tensor. A proper warning will be shown
# during reload (if applicable), but since the file is not necessarily compatible with
# the config, better show a proper warning.
shared_names, identical_names = _find_identical(shared_names, state_dict)
# delete tensors that have identical storage
for inames in identical_names:
known = inames.intersection(to_delete_names)
for name in known:
del state_dict[name]
unknown = inames.difference(to_delete_names)
if len(unknown) > 1:
error_names.append(unknown)
if shared_names:
error_names.extend(shared_names)
if len(error_names) > 0:
raise RuntimeError(
f"The weights trying to be saved contained shared tensors {error_names} which are not properly defined. We found `_tied_weights_keys` to be: {_tied_weights_keys}.\n"
"This can also just mean that the module's tied weight keys are wrong vs the actual tied weights in the model.",
)
# Revert all renaming and/or weight operations
if save_original_format:
state_dict = revert_weight_conversion(self, state_dict)
# Shard the model if it is too big.
if not _hf_peft_config_loaded:
weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
weights_name = _add_variant(weights_name, variant)
else:
weights_name = ADAPTER_SAFE_WEIGHTS_NAME if safe_serialization else ADAPTER_WEIGHTS_NAME
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
state_dict_split = split_torch_state_dict_into_shards(
state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size
)
# Save index if sharded
index = None
if state_dict_split.is_sharded:
index = {
"metadata": {"total_parameters": self.num_parameters(), **state_dict_split.metadata},
"weight_map": state_dict_split.tensor_to_filename,
}
# Clean the folder from a previous save
for filename in os.listdir(save_directory):
full_filename = os.path.join(save_directory, filename)
# If we have a shard file that is not going to be replaced, we delete it, but only from the main process
# in distributed settings to avoid race conditions.
weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "")
# make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005
filename_no_suffix = filename.replace(".bin", "").replace(".safetensors", "")
reg = re.compile(r"(.*?)-\d{5}-of-\d{5}")
if (
filename.startswith(weights_no_suffix)
and os.path.isfile(full_filename)
and filename not in state_dict_split.filename_to_tensors
and is_main_process
and reg.fullmatch(filename_no_suffix) is not None
):
os.remove(full_filename)
# Save the model
filename_to_tensors = state_dict_split.filename_to_tensors.items()
if module_map:
filename_to_tensors = logging.tqdm(filename_to_tensors, desc="Saving checkpoint shards")
for shard_file, tensors in filename_to_tensors:
shard = {}
for tensor in tensors:
if _is_dtensor_available and isinstance(state_dict[tensor], DTensor):
full_tensor = state_dict[tensor].full_tensor()
# to get the correctly ordered tensor we need to repack if packed
if _get_parameter_tp_plan(tensor, self._tp_plan) == "local_packed_rowwise":
full_tensor = repack_weights(full_tensor, -1, self._tp_size, 2)
shard[tensor] = full_tensor.contiguous() # only do contiguous after it's permuted correctly
else:
shard[tensor] = state_dict[tensor].contiguous()
# delete reference, see https://github.com/huggingface/transformers/pull/34890
del state_dict[tensor]
# remake shard with onloaded parameters if necessary
if module_map:
# init state_dict for this shard
shard_state_dict = dict.fromkeys(shard, "")
for module_name in shard:
# note that get_state_dict_from_offload can update with meta tensors
# if both a parent module and its descendant are offloaded
tensor = shard_state_dict[module_name]
if tensor == "" or (isinstance(tensor, torch.Tensor) and tensor.device.type == "meta"):
# update state dict with onloaded parameters
module = module_map[module_name]
shard_state_dict = get_state_dict_from_offload(module, module_name, shard_state_dict)
# assign shard to be the completed state dict
shard = shard_state_dict
del shard_state_dict
gc.collect()
if safe_serialization:
# At some point we will need to deal better with save_function (used for TPU and other distributed
# joyfulness), but for now this enough. # TODO: we should def parallelize this we are otherwise just waiting
# too much before scheduling the next write when its in a different file
safe_save_file(shard, os.path.join(save_directory, shard_file), metadata=metadata)
else:
save_function(shard, os.path.join(save_directory, shard_file))
del state_dict
if index is None:
path_to_weights = os.path.join(save_directory, weights_name)
logger.info(f"Model weights saved in {path_to_weights}")
else:
save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME
save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant))
# Save the index as well
with open(save_index_file, "w", encoding="utf-8") as f:
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
f.write(content)
logger.info(
f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
f"split in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the "
f"index located at {save_index_file}."
)
if push_to_hub:
# Eventually create an empty model card
model_card = create_and_tag_model_card(repo_id, self.model_tags, token=token)
# Update model card if needed:
model_card.save(os.path.join(save_directory, "README.md"))
self._upload_modified_files(
save_directory,
repo_id,
files_timestamps,
commit_message=commit_message,
token=token,
create_pr=create_pr,
)
@wraps(PushToHubMixin.push_to_hub)
def push_to_hub(self, *args, **kwargs):
tags = self.model_tags if self.model_tags is not None else []
tags_kwargs = kwargs.get("tags", [])
if isinstance(tags_kwargs, str):
tags_kwargs = [tags_kwargs]
for tag in tags_kwargs:
if tag not in tags:
tags.append(tag)
if tags:
kwargs["tags"] = tags
return super().push_to_hub(*args, **kwargs)
def get_memory_footprint(self, return_buffers=True):
r"""
Get the memory footprint of a model. This will return the memory footprint of the current model in bytes.
Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the
PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2
Arguments:
return_buffers (`bool`, *optional*, defaults to `True`):
Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers
are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch
norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2
"""
mem = sum(param.nelement() * param.element_size() for param in self.parameters())
if return_buffers:
mem_bufs = sum(buf.nelement() * buf.element_size() for buf in self.buffers())
mem = mem + mem_bufs
return mem
@wraps(torch.nn.Module.cuda)
def cuda(self, *args, **kwargs):
if getattr(self, "quantization_method", None) == QuantizationMethod.HQQ:
from hqq.core.quantize import HQQLinear
# Since HQQLinear stores some tensors in the 'meta' attribute,
# it's necessary to manually call the `cuda` method on HQQLinear layers.
super().cuda(*args, **kwargs)
for module in self.modules():
if isinstance(module, HQQLinear):
if len(args) > 0:
device = args[0]
else:
device = kwargs.get("device", "cuda")
module.cuda(device)
return self
# Checks if the model has been loaded in 4-bit or 8-bit with BNB
if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES:
if getattr(self, "is_loaded_in_8bit", False):
raise ValueError(
"Calling `cuda()` is not supported for `8-bit` quantized models. "
" Please use the model as it is, since the model has already been set to the correct devices."
)
return super().cuda(*args, **kwargs)
@wraps(torch.nn.Module.to)
def to(self, *args, **kwargs):
# For BNB/GPTQ models, we prevent users from casting the model to another dtype to restrict unwanted behaviours.
# the correct API should be to load the model with the desired dtype directly through `from_pretrained`.
dtype_present_in_args = "dtype" in kwargs
if not dtype_present_in_args:
for arg in args:
if isinstance(arg, torch.dtype):
dtype_present_in_args = True
break
if getattr(self, "quantization_method", None) == QuantizationMethod.HQQ:
from hqq.core.quantize import HQQLinear
# Since HQQLinear stores some tensors in the 'meta' attribute, we must
# explicitly move the parameters to the target device for each HQQLinear layer after `to`.
super().to(*args, **kwargs)
for module in self.modules():
if isinstance(module, HQQLinear):
if "device" in kwargs:
device = kwargs["device"]
else:
device = args[0]
if "dtype" in kwargs:
dtype = kwargs["dtype"]
elif dtype_present_in_args:
dtype = arg
else:
dtype = None
# Due to the current messy implementation of HQQLinear, updating `compute_dtype`
# followed by calling the `cuda` method achieves the intended behavior of `to`,
# even when the target device is CPU.
if dtype is not None:
module.compute_dtype = dtype
module.cuda(device)
return self
if dtype_present_in_args and getattr(self, "quantization_method", None) == QuantizationMethod.QUARK:
raise ValueError("Casting a Quark quantized model to a new `dtype` is not supported.")
# Checks if the model has been loaded in 4-bit or 8-bit with BNB
if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES:
if dtype_present_in_args:
raise ValueError(
"You cannot cast a bitsandbytes model in a new `dtype`. Make sure to load the model using `from_pretrained` using the"
" desired `dtype` by passing the correct `dtype` argument."
)
if getattr(self, "is_loaded_in_8bit", False):
raise ValueError(
"`.to` is not supported for `8-bit` bitsandbytes models. Please use the model as it is, since the"
" model has already been set to the correct devices and casted to the correct `dtype`."
)
elif getattr(self, "quantization_method", None) == QuantizationMethod.GPTQ:
if dtype_present_in_args:
raise ValueError(
"You cannot cast a GPTQ model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired"
" `dtype` by passing the correct `dtype` argument."
)
return super().to(*args, **kwargs)
def half(self, *args):
# Checks if the model is quantized
if getattr(self, "is_quantized", False):
raise ValueError(
"`.half()` is not supported for quantized model. Please use the model as it is, since the"
" model has already been casted to the correct `dtype`."
)
else:
return super().half(*args)
def float(self, *args):
# Checks if the model is quantized
if getattr(self, "is_quantized", False):
raise ValueError(
"`.float()` is not supported for quantized model. Please use the model as it is, since the"
" model has already been casted to the correct `dtype`."
)
else:
return super().float(*args)
@classmethod
def get_init_context(cls, is_quantized: bool, _is_ds_init_called: bool):
if is_deepspeed_zero3_enabled():
import deepspeed
init_contexts = [no_init_weights()]
# We cannot initialize the model on meta device with deepspeed when not quantized
if not is_quantized and not _is_ds_init_called:
logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
init_contexts.extend([deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()])
elif is_quantized:
init_contexts.extend([init_empty_weights(), set_quantized_state()])
else:
init_contexts = [no_init_weights(), init_empty_weights()]
return init_contexts
def set_use_kernels(self, use_kernels, kernel_config):
if use_kernels:
if not is_kernels_available():
raise ValueError(
"`use_kernels=True` requires kernels>=0.9.0. Please install the latest version with `pip install -U kernels`"
)
from kernels import use_kernel_mapping
from .integrations.hub_kernels import register_kernel_mapping_transformers
register_kernel_mapping_transformers()
if kernel_config is not None and isinstance(kernel_config, KernelConfig):
# This will make sure the mapping is valid, and the layers are registered in the model
kernel_config.sanitize_kernel_mapping(self)
# This will create a compatible mapping for the model with the kernels library
kernel_config.create_compatible_mapping(self)
# This is a context manager to override the default kernel mapping
# We are calling kernelize inside this context manager using the use_kernels setter
with use_kernel_mapping(kernel_config.kernel_mapping):
self.use_kernels = True
# We use the default kernel mapping in .integrations.hub_kernels
else:
self.use_kernels = True
else:
self.use_kernels = False
@classmethod
@restore_default_dtype
def from_pretrained(
cls: type[SpecificPreTrainedModelType],
pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
*model_args,
config: Optional[Union[PreTrainedConfig, str, os.PathLike]] = None,
cache_dir: Optional[Union[str, os.PathLike]] = None,
ignore_mismatched_sizes: bool = False,
force_download: bool = False,
local_files_only: bool = False,
token: Optional[Union[str, bool]] = None,
revision: str = "main",
use_safetensors: Optional[bool] = True,
weights_only: bool = True,
**kwargs,
) -> SpecificPreTrainedModelType:
r"""
Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you should first set it back in training mode with `model.train()`.
The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- `None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments `config` and `state_dict`).
model_args (sequence of positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
config (`Union[PreTrainedConfig, str, os.PathLike]`, *optional*):
Can be either:
- an instance of a class derived from [`PreTrainedConfig`],
- a string or path valid as input to [`~PreTrainedConfig.from_pretrained`].
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the *model id* string of a pretrained
model).
- The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
save directory.
- The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
configuration JSON file named *config.json* is found in the directory.
state_dict (`dict[str, torch.Tensor]`, *optional*):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and
[`~PreTrainedModel.from_pretrained`] is not a simpler option.
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
checkpoint with 3 labels).
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether or not to only look at local files (i.e., do not try to download the model).
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
the token generated when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
</Tip>
attn_implementation (`str`, *optional*):
The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)), or `"flash_attention_3"` (using [Dao-AILab/flash-attention/hopper](https://github.com/Dao-AILab/flash-attention/tree/main/hopper)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation.
Accept HF kernel references in the form:
<namespace>/<repo_name>[@<revision>][:<kernel_name>]
- <namespace> and <repo_name> are any non-"/" and non-":" sequences.
- "@<revision>" is optional (branch, tag, or commit-ish), e.g. "@main", "@v1.2.0", "@abc123".
- ":<kernel_name>" is optional and selects a function inside the kernel repo.
- Both options can appear together and in this order only: @revision first, then :kernel_name.
- We intentionally allow a leading "<wrapper>|" prefix (e.g., "flash|...") because the code
strips it before loading; '|' is not excluded in the character classes here.
Examples that match:
"org/model"
"org/model@main"
"org/model:custom_kernel"
"org/model@v1.2.3:custom_kernel"
> Parameters for big model inference
dtype (`str` or `torch.dtype`, *optional*):
Override the default `torch_dtype` and load the model under a specific `dtype`. The different options
are:
1. `torch.float16` or `torch.bfloat16` or `torch.float`: load in a specified
`dtype`, ignoring the model's `config.dtype` if one exists. If not specified
- the model will get loaded in `torch.float` (fp32).
2. `"auto"` - A `dtype` or `torch_dtype` entry in the `config.json` file of the model will be
attempted to be used. If this entry isn't found then next check the `dtype` of the first weight in
the checkpoint that's of a floating point type and use that as `dtype`. This will load the model
using the `dtype` it was saved in at the end of the training. It can't be used as an indicator of how
the model was trained. Since it could be trained in one of half precision dtypes, but saved in fp32.
3. A string that is a valid `torch.dtype`. E.g. "float32" loads the model in `torch.float32`, "float16" loads in `torch.float16` etc.
<Tip>
For some models the `dtype` they were trained in is unknown - you may try to check the model's paper or
reach out to the authors and ask them to add this information to the model's card and to insert the
`dtype` or `torch_dtype` entry in `config.json` on the hub.
</Tip>
device_map (`str` or `dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*):
A map that specifies where each submodule should go. It doesn't need to be refined to each
parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank
like `1`) on which the model will be allocated, the device map will map the entire model to this
device. Passing `device_map = 0` means put the whole model on GPU 0.
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
more information about each option see [designing a device
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
max_memory (`Dict`, *optional*):
A dictionary device identifier to maximum memory if using `device_map`. Will default to the maximum memory available for each
GPU and the available CPU RAM if unset.
tp_plan (`Optional[Union[dict, str]]`, *optional*):
A torch tensor parallel plan, see [here](https://pytorch.org/tutorials/intermediate/TP_tutorial.html). Use `tp_plan="auto"` to
use the predefined plan based on the model. If it's a dict, then it should match between module names and desired layout.
Note that if you use it, you should launch your script accordingly with `torchrun [args] script.py`. This will be much
faster than using a `device_map`, but has limitations.
tp_size (`str`, *optional*):
A torch tensor parallel degree. If not provided would default to world size.
device_mesh (`torch.distributed.DeviceMesh`, *optional*):
A torch device mesh. If not provided would default to world size. Used only for tensor parallel for now.
If provided, it has to contain dimension named `"tp"` in case it's > 1 dimensional, this dimension will be used for tensor parallelism
offload_folder (`str` or `os.PathLike`, *optional*):
If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
offload_buffers (`bool`, *optional*):
Whether or not to offload the buffers with the model parameters.
quantization_config (`Union[QuantizationConfigMixin,Dict]`, *optional*):
A dictionary of configuration parameters or a QuantizationConfigMixin object for quantization (e.g
bitsandbytes, gptq).
subfolder (`str`, *optional*, defaults to `""`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
specify the folder name here.
variant (`str`, *optional*):
If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin.
use_safetensors (`bool`, *optional*, defaults to `None`):
Whether or not to use `safetensors` checkpoints. Defaults to `None`. If not specified and `safetensors`
is not installed, it will be set to `False`.
weights_only (`bool`, *optional*, defaults to `True`):
Indicates whether unpickler should be restricted to loading only tensors, primitive types,
dictionaries and any types added via torch.serialization.add_safe_globals().
When set to False, we can load wrapper tensor subclass weights.
key_mapping (`dict[str, str], *optional*):
A potential mapping of the weight names if using a model on the Hub which is compatible to a Transformers
architecture, but was not converted accordingly.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`). Behaves differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with `config`, `**kwargs` will be directly passed to the
underlying model's `__init__` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, `kwargs` will be first passed to the configuration class
initialization function ([`~PreTrainedConfig.from_pretrained`]). Each key of `kwargs` that
corresponds to a configuration attribute will be used to override said attribute with the
supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
will be passed to the underlying model's `__init__` function.
<Tip>
Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to
use this method in a firewalled environment.
</Tip>
Examples:
```python
>>> from transformers import BertConfig, BertModel
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BertModel.from_pretrained("google-bert/bert-base-uncased")
>>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
>>> model = BertModel.from_pretrained("./test/saved_model/")
>>> # Update configuration during loading.
>>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", output_attentions=True)
>>> assert model.config.output_attentions == True
```
"""
state_dict = kwargs.pop("state_dict", None)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
dtype = kwargs.pop("dtype", None)
torch_dtype = kwargs.pop("torch_dtype", None) # kept for BC
device_map = kwargs.pop("device_map", None)
max_memory = kwargs.pop("max_memory", None)
offload_folder = kwargs.pop("offload_folder", None)
offload_buffers = kwargs.pop("offload_buffers", False)
quantization_config = kwargs.pop("quantization_config", None)
subfolder = kwargs.pop("subfolder", "")
commit_hash = kwargs.pop("_commit_hash", None)
variant = kwargs.pop("variant", None)
adapter_kwargs = (kwargs.pop("adapter_kwargs", {}) or {}).copy()
adapter_name = kwargs.pop("adapter_name", "default")
generation_config = kwargs.pop("generation_config", None)
gguf_file = kwargs.pop("gguf_file", None)
tp_plan = kwargs.pop("tp_plan", None)
tp_size = kwargs.pop("tp_size", None)
distributed_config: DistributedConfig = kwargs.pop("distributed_config", None)
device_mesh = kwargs.pop("device_mesh", None)
trust_remote_code = kwargs.pop("trust_remote_code", None)
use_kernels = kwargs.pop("use_kernels", False)
kernel_config = kwargs.pop("kernel_config", None)
key_mapping = kwargs.pop("key_mapping", None)
if distributed_config is not None and tp_plan is None:
tp_plan = "auto"
# Not used anymore -- remove them from the kwargs
for name in ["mirror", "_fast_init", "low_cpu_mem_usage", "from_tf", "from_flax", "offload_state_dict"]:
_ = kwargs.pop(name, None)
# For BC on torch_dtype argument
if torch_dtype is not None:
dtype = dtype if dtype is not None else torch_dtype
if is_offline_mode() and not local_files_only:
local_files_only = True
download_kwargs = {
"cache_dir": cache_dir,
"force_download": force_download,
"proxies": proxies,
"local_files_only": local_files_only,
"token": token,
"revision": revision,
"subfolder": subfolder,
}
download_kwargs_with_commit = {**download_kwargs, "commit_hash": commit_hash}
if state_dict is not None and (pretrained_model_name_or_path is not None or gguf_file is not None):
raise ValueError(
"`state_dict` cannot be passed together with a model name or a `gguf_file`. Use one of the two loading strategies."
)
if device_map == "auto" and int(os.environ.get("WORLD_SIZE", "0")):
logger.info(
"You've set device_map=`auto` while triggering a distributed run with torchrun. This might lead to unexpected behavior. "
"If your plan is to load the model on each device, you should set device_map={"
": PartialState().process_index} where PartialState comes from accelerate library"
)
if tp_plan is not None or tp_size is not None: # TP warnings, and setup
device_map, device_mesh, tp_size = initialize_tensor_parallelism(
tp_plan, tp_size=tp_size, device_mesh=device_mesh, device_map=device_map
)
if gguf_file is not None and not is_accelerate_available():
raise ValueError("accelerate is required when loading a GGUF file `pip install accelerate`.")
if adapter_kwargs is None:
adapter_kwargs = {}
_adapter_model_path, pretrained_model_name_or_path, adapter_kwargs = maybe_load_adapters(
pretrained_model_name_or_path,
download_kwargs_with_commit,
**adapter_kwargs,
)
device_map = check_and_set_device_map(device_map) # warn, error and fix the device map
user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
# Load config if we don't provide a configuration
if not isinstance(config, PreTrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
return_unused_kwargs=True,
gguf_file=gguf_file,
_from_auto=from_auto_class,
_from_pipeline=from_pipeline,
**download_kwargs,
**kwargs,
)
if "gguf_file" in model_kwargs:
model_kwargs.pop("gguf_file")
commit_hash = model_kwargs.pop("_commit_hash", commit_hash)
else:
config = copy.deepcopy(config)
model_kwargs = kwargs
commit_hash = getattr(config, "_commit_hash", commit_hash)
download_kwargs_with_commit["commit_hash"] = commit_hash
# Because some composite configs call super().__init__ before instantiating the sub-configs, we need this call
# to correctly redispatch recursively if the kwarg is provided
if "attn_implementation" in kwargs:
config._attn_implementation = kwargs.pop("attn_implementation")
hf_quantizer, config, dtype, device_map = get_hf_quantizer(
config, quantization_config, dtype, device_map, weights_only, user_agent
)
if gguf_file:
if hf_quantizer is not None:
raise ValueError(
"You cannot combine Quantization and loading a model from a GGUF file, try again by making sure you did not passed a `quantization_config` or that you did not load a quantized model from the Hub."
)
if device_map is not None and (
(isinstance(device_map, dict) and "disk" in device_map.values()) or "disk" in device_map
):
raise RuntimeError(
"One or more modules is configured to be mapped to disk. Disk offload is not supported for models "
"loaded from GGUF files."
)
if kernel_config is not None and not use_kernels:
logger.warning_once(
"A kernel_config was provided but use_kernels is False; setting use_kernels=True automatically. To suppress this warning, explicitly set use_kernels to True."
)
use_kernels = True
checkpoint_files, sharded_metadata = _get_resolved_checkpoint_files(
pretrained_model_name_or_path=pretrained_model_name_or_path,
variant=variant,
gguf_file=gguf_file,
use_safetensors=use_safetensors,
download_kwargs=download_kwargs_with_commit,
user_agent=user_agent,
is_remote_code=cls._auto_class is not None,
transformers_explicit_filename=getattr(config, "transformers_weights", None),
)
is_quantized = hf_quantizer is not None
if gguf_file:
from .modeling_gguf_pytorch_utils import load_gguf_checkpoint
# we need a dummy model to get the state_dict - for this reason, we keep the state_dict as if it was
# passed directly as a kwarg from now on
with torch.device("meta"):
dummy_model = cls(config)
state_dict = load_gguf_checkpoint(checkpoint_files[0], return_tensors=True, model_to_load=dummy_model)[
"tensors"
]
# Find the correct dtype based on current state
config, dtype, dtype_orig = _get_dtype(
cls, dtype, checkpoint_files, config, sharded_metadata, state_dict, weights_only
)
config.name_or_path = pretrained_model_name_or_path
model_init_context = cls.get_init_context(is_quantized, _is_ds_init_called)
config = copy.deepcopy(config) # We do not want to modify the config inplace in from_pretrained.
with ContextManagers(model_init_context):
# Let's make sure we don't run the init function of buffer modules
model = cls(config, *model_args, **model_kwargs)
# Obtain the weight conversion mapping for this model if any are registered
weight_conversions = get_model_conversion_mapping(model, key_mapping, hf_quantizer)
# make sure we use the model's config since the __init__ call might have copied it
config = model.config
if hf_quantizer is not None: # replace module with quantized modules (does not touch weights)
hf_quantizer.preprocess_model(
model=model,
device_map=device_map,
keep_in_fp32_modules=model._keep_in_fp32_modules, # TODO prob no longer needed?
config=config,
checkpoint_files=checkpoint_files,
use_kernels=use_kernels,
)
if _torch_distributed_available and device_mesh is not None: # add hooks to nn.Modules: no weights
model = distribute_model(model, tp_plan, distributed_config, device_mesh, tp_size)
# Prepare the full device map
if device_map is not None:
device_map = _get_device_map(model, device_map, max_memory, hf_quantizer)
# restore default dtype
if dtype_orig is not None:
torch.set_default_dtype(dtype_orig)
# Finalize model weight initialization
model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs = cls._load_pretrained_model(
model,
state_dict,
checkpoint_files,
pretrained_model_name_or_path,
ignore_mismatched_sizes=ignore_mismatched_sizes,
sharded_metadata=sharded_metadata,
device_map=device_map,
disk_offload_folder=offload_folder,
dtype=dtype,
hf_quantizer=hf_quantizer,
device_mesh=device_mesh,
weights_only=weights_only,
weight_mapping=weight_conversions,
)
model.eval() # Set model in evaluation mode to deactivate DropOut modules by default
model.set_use_kernels(use_kernels, kernel_config)
# If it is a model with generation capabilities, attempt to load generation files (generation config,
# custom generate function)
if model.can_generate() and hasattr(model, "adjust_generation_fn"):
model.adjust_generation_fn(
generation_config,
from_auto_class,
from_pipeline,
pretrained_model_name_or_path,
**download_kwargs,
trust_remote_code=trust_remote_code,
**kwargs,
)
# for device_map="auto" : dispatch model with hooks on all devices if necessary
if device_map is not None and device_mesh is None:
accelerate_dispatch(model, hf_quantizer, device_map, offload_folder, offload_index, offload_buffers)
if hf_quantizer is not None:
model.hf_quantizer = hf_quantizer
hf_quantizer.postprocess_model(model, config=config) # usually a no-op but sometimes needed
if _adapter_model_path is not None:
adapter_kwargs["key_mapping"] = weight_conversions # TODO: Dynamic weight loader for adapters
model.load_adapter(
_adapter_model_path,
adapter_name=adapter_name,
token=token,
adapter_kwargs=adapter_kwargs,
)
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"mismatched_keys": mismatched_keys,
"error_msgs": error_msgs,
}
return model, loading_info
return model
@classmethod
def _load_pretrained_model(
cls,
model: "PreTrainedModel",
state_dict: Optional[dict],
checkpoint_files: Optional[list[str]],
pretrained_model_name_or_path: Optional[str],
ignore_mismatched_sizes: bool = False,
sharded_metadata: Optional[dict] = None,
device_map: Optional[dict] = None,
disk_offload_folder: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
hf_quantizer: Optional[HfQuantizer] = None,
device_mesh: Optional["torch.distributed.device_mesh.DeviceMesh"] = None,
weights_only: bool = True,
weight_mapping: Optional[Sequence[WeightConverter | WeightRenaming]] = None,
):
is_quantized = hf_quantizer is not None
is_hqq_or_quark = is_quantized and hf_quantizer.quantization_config.quant_method in {
QuantizationMethod.HQQ,
QuantizationMethod.QUARK,
}
# Model's definition arriving here is final (TP hooks added, quantized layers replaces)
expected_keys = list(model.state_dict().keys())
if logger.level >= logging.WARNING:
verify_tp_plan(expected_keys, getattr(model, "_tp_plan", None))
# This offload index if for params explicitly on the "disk" in the device_map
disk_offload_index = None
# Prepare parameters offloading if needed
if device_map is not None and "disk" in device_map.values():
disk_offload_index = accelerate_disk_offload(
disk_offload_folder,
checkpoint_files,
device_map,
expected_keys,
sharded_metadata,
dtype,
weight_mapping,
)
# Warmup cuda to load the weights much faster on devices
if device_map is not None and not is_hqq_or_quark:
expanded_device_map = expand_device_map(device_map, expected_keys)
caching_allocator_warmup(model, expanded_device_map, hf_quantizer)
tp_plan = getattr(model, "_tp_plan", None)
error_msgs = []
if is_deepspeed_zero3_enabled() and not is_quantized:
if state_dict is None:
merged_state_dict = {}
for ckpt_file in checkpoint_files:
merged_state_dict.update(load_state_dict(ckpt_file, map_location="cpu", weights_only=weights_only))
state_dict = merged_state_dict
error_msgs += _load_state_dict_into_zero3_model(model, state_dict)
# This is not true but for now we assume only best-case scenario with deepspeed, i.e. perfectly matching checkpoints
missing_keys, unexpected_keys, mismatched_keys, misc = set(), set(), set(), set()
else:
all_pointer = set()
# Checkpoints are safetensors
if checkpoint_files is not None and checkpoint_files[0].endswith(".safetensors"):
merged_state_dict = {}
for file in checkpoint_files:
file_pointer = safe_open(file, framework="pt", device="cpu")
all_pointer.add(file_pointer)
for k in file_pointer.keys():
merged_state_dict[k] = file_pointer.get_slice(k) # don't materialize yet
# User passed an explicit state_dict
elif state_dict is not None:
merged_state_dict = state_dict
# Checkpoints are .bin
elif checkpoint_files is not None:
merged_state_dict = {}
for ckpt_file in checkpoint_files:
merged_state_dict.update(load_state_dict(ckpt_file))
else:
raise ValueError("Neither a state dict nor checkpoint files were found.")
missing_keys, unexpected_keys, mismatched_keys, disk_offload_index, misc = (
convert_and_load_state_dict_in_model(
model,
merged_state_dict,
weight_mapping,
tp_plan,
hf_quantizer,
dtype,
device_map,
model.dtype_plan,
device_mesh,
disk_offload_index,
disk_offload_folder,
)
)
# finally close all opened file pointers
for k in all_pointer:
k.__exit__(None, None, None)
# Marks tied weights as `_is_hf_initialized` to avoid initializing them (it's very important for efficiency)
model.mark_tied_weights_as_initialized()
# Move missing (and potentially mismatched) keys back to cpu from meta device (because they won't be moved when
# loading the weights as they are not in the loaded state dict)
miss_and_mismatched = missing_keys | {k[0] for k in mismatched_keys}
model._move_missing_keys_from_meta_to_cpu(miss_and_mismatched, dtype, hf_quantizer)
# Correctly initialize the missing (and potentially mismatched) keys (all parameters without the `_is_hf_initialzed` flag)
model._initialize_missing_keys(is_quantized)
# Tie the weights
model.tie_weights(missing_keys=missing_keys, recompute_mapping=False)
# Adjust missing and unexpected keys
missing_keys, unexpected_keys = model._adjust_missing_and_unexpected_keys(missing_keys, unexpected_keys)
# Post-processing for tensor parallelism
if device_mesh is not None:
# When using TP, the device map is a single device for all parameters
tp_device = list(device_map.values())[0]
# This is needed for the RotaryEmbedding, which was not initialized on the correct device as it is
# not part of the state_dict (persistent=False)
for buffer in model.buffers(): # TODO to avaoid this buffer could be added to the ckpt
if buffer.device != tp_device:
buffer.data = buffer.to(tp_device)
# In this case, the top-most task module weights were not moved to device and parallelized as they
# were not part of the loaded weights: do it now
if missing_keys:
state_dict = model.state_dict()
for name in missing_keys:
param = state_dict[name]
# Shard the param
shard_and_distribute_module(
model,
param.to(tp_device),
param,
name,
None,
False,
device_mesh.get_local_rank(),
device_mesh,
)
log_state_dict_report(
model=model,
pretrained_model_name_or_path=pretrained_model_name_or_path,
logger=logger,
error_msgs=error_msgs,
unexpected_keys=unexpected_keys,
missing_keys=missing_keys,
mismatched_keys=mismatched_keys,
mismatched_shapes=mismatched_keys,
misc=misc,
ignore_mismatched_sizes=ignore_mismatched_sizes,
)
return model, missing_keys, unexpected_keys, mismatched_keys, disk_offload_index, error_msgs
def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False):
module_keys = {".".join(key.split(".")[:-1]) for key in names}
# torch.nn.ParameterList is a special case where two parameter keywords
# are appended to the module name, *e.g.* bert.special_embeddings.0
module_keys = module_keys.union(
{".".join(key.split(".")[:-2]) for key in names if len(key) > 0 and key[-1].isdigit()}
)
retrieved_modules = []
# retrieve all modules that has at least one missing weight name
for name, module in self.named_modules():
if remove_prefix:
_prefix = f"{self.base_model_prefix}."
name = name.removeprefix(_prefix)
elif add_prefix:
name = ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix
if name in module_keys:
retrieved_modules.append(module)
return retrieved_modules
@classmethod
def register_for_auto_class(cls, auto_class="AutoModel"):
"""
Register this class with a given auto class. This should only be used for custom models as the ones in the
library are already mapped with an auto class.
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`):
The auto class to register this new model with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def warn_if_padding_and_no_attention_mask(self, input_ids, attention_mask):
"""
Shows a one-time warning if the input_ids appear to contain padding and no attention mask was given.
"""
# Skip the check during tracing.
if is_tracing(input_ids):
return
if (attention_mask is not None) or (self.config.pad_token_id is None):
return
# Check only the first and last input IDs to reduce overhead.
if self.config.pad_token_id in input_ids[:, [-1, 0]]:
warn_string = (
"We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See "
"https://huggingface.co/docs/transformers/troubleshooting"
"#incorrect-output-when-padding-tokens-arent-masked."
)
# If the pad token is equal to either BOS, EOS, or SEP, we do not know whether the user should use an
# attention_mask or not. In this case, we should still show a warning because this is a rare case.
if (
(self.config.bos_token_id is not None and self.config.bos_token_id == self.config.pad_token_id)
or (self.config.eos_token_id is not None and self.config.eos_token_id == self.config.pad_token_id)
or (self.config.sep_token_id is not None and self.config.sep_token_id == self.config.pad_token_id)
):
warn_string += (
f"\nYou may ignore this warning if your `pad_token_id` ({self.config.pad_token_id}) is identical "
f"to the `bos_token_id` ({self.config.bos_token_id}), `eos_token_id` ({self.config.eos_token_id}), "
f"or the `sep_token_id` ({self.config.sep_token_id}), and your input is not padded."
)
logger.warning_once(warn_string)
@property
def supports_tp_plan(self):
"""
Returns whether the model has a tensor parallelism plan.
"""
if self._tp_plan is not None:
return True
# Check if base model has a TP plan
if getattr(self.base_model, "_tp_plan", None) is not None:
return True
if self.config.base_model_tp_plan is not None:
return True
return False
@property
def tp_size(self):
"""
Returns the model's tensor parallelism degree.
"""
# if None, the model didn't undergo tensor parallel sharding
return self._tp_size
@property
def supports_pp_plan(self):
if self._pp_plan is not None:
return True
# Check if base model has PP plan
if getattr(self.base_model, "_pp_plan", None) is not None:
return True
return False
@property
def loss_function(self):
if hasattr(self, "_loss_function"):
return self._loss_function
loss_type = getattr(self, "loss_type", None)
if loss_type is None or loss_type not in LOSS_MAPPING:
logger.warning_once(
f"`loss_type={loss_type}` was set in the config but it is unrecognized. "
f"Using the default loss: `ForCausalLMLoss`."
)
loss_type = "ForCausalLM"
return LOSS_MAPPING[loss_type]
@loss_function.setter
def loss_function(self, value):
self._loss_function = value
def kernelize(self, mode=None):
if not is_kernels_available():
raise ValueError(
"Kernels are not available. To use kernels, please install kernels using `pip install kernels`"
)
from kernels import Device, Mode, kernelize
mode = Mode.INFERENCE if not self.training else Mode.TRAINING if mode is None else mode
kernelize(self, device=Device(type=self.device.type), mode=mode)
self._use_kernels = True
@property
def use_kernels(self) -> bool:
return getattr(self, "_use_kernels", False)
@use_kernels.setter
def use_kernels(self, value: bool) -> None:
# Avoid re-kernelizing if already enabled
if bool(value) and getattr(self, "_use_kernels", False):
return
if value:
self.kernelize()
else:
if getattr(self, "_use_kernels", False):
logger.warning_once(
"Disabling kernels at runtime is a no-op as there is no 'unkernelize' routine; keeping current kernels active."
)
self._use_kernels = False
def get_compiled_call(self, compile_config: Optional[CompileConfig]) -> Callable:
"""Return a `torch.compile`'d version of `self.__call__`. This is useful to dynamically choose between
non-compiled/compiled `forward` during inference, especially to switch between prefill (where we don't
want to use compiled version to avoid recomputing the graph with new shapes) and iterative decoding
(where we want the speed-ups of compiled version with static shapes)."""
# Only reset it if not present or different from previous config
if "llama4" in self.config.model_type: # TODO try to enable for FULL COMPILE HYBRID CACHE SUPPORT
return self.__call__
compile_config = compile_config or CompileConfig()
default_config = getattr(self.generation_config, "compile_config", None) or CompileConfig()
if (
not hasattr(self, "_compiled_call")
or getattr(self, "_last_compile_config", default_config) != compile_config
):
self._last_compile_config = compile_config
self._compiled_call = torch.compile(self.__call__, **compile_config.to_dict())
return self._compiled_call
@classmethod
def is_backend_compatible(cls):
return cls._supports_attention_backend
def _move_missing_keys_from_meta_to_cpu(
self, missing_keys: list[str], dtype: torch.dtype, hf_quantizer: Optional[HfQuantizer]
) -> None:
"""Move the missing keys (keys that are part of the model parameters, but were NOT found in the loaded state dicts) back
from meta device to cpu.
"""
is_quantized = hf_quantizer is not None
# In this case we need to move everything back
if is_fsdp_enabled() and not is_local_dist_rank_0() and not is_quantized:
# We only do it for the parameters, as the buffers are not initialized on the meta device by default
for key, param in self.named_parameters():
value = torch.empty_like(param, dtype=dtype, device="cpu")
_load_parameter_into_model(self, key, value)
return
model_state_dict = self.state_dict()
# The tied weight keys are in the "missing" usually, but they should not be moved (they will be tied anyway)
# This is especially important because if they are moved, they will lose the `_is_hf_initialized` flag, and they
# will be re-initialized for nothing (which can be quite long)
for key in missing_keys - self.all_tied_weights_keys.keys():
param = model_state_dict[key]
# Buffers are not initialized on the meta device, so we still need this check to avoid overwriting them
if param.device == torch.device("meta"):
value = torch.empty_like(param, dtype=dtype, device="cpu")
if not is_quantized or not hf_quantizer.param_needs_quantization(self, key):
_load_parameter_into_model(self, key, value)
def _initialize_missing_keys(self, is_quantized: bool) -> None:
"""
Initialize the missing keys (keys that are part of the model parameters, but were NOT found in the loaded state dicts), according to
`_initialize_weights`. Indeed, since the corresponding weights are missing from the state dict, they will not be replaced and need to
be initialized correctly (i.e. weight initialization distribution).
Params that are not missing have the `is_hf_initialized` flag.
"""
# This will only initialize submodules that are not marked as initialized by the line above.
if is_deepspeed_zero3_enabled() and not is_quantized:
import deepspeed
# keep_vars=True as we need the original tensors, so that the "_is_hf_initialized" is present on them
not_initialized_parameters = list(
{v for v in self.state_dict(keep_vars=True).values() if not getattr(v, "_is_hf_initialized", False)}
)
with deepspeed.zero.GatheredParameters(not_initialized_parameters, modifier_rank=0):
self.initialize_weights()
else:
self.initialize_weights()
def _adjust_missing_and_unexpected_keys(
self, missing_keys: set[str], unexpected_keys: set[str]
) -> tuple[set[str], set[str]]:
"""Adjust the `missing_keys` and `unexpected_keys` based on current model's exception rules, to avoid
raising unneeded warnings/errors.
Also, set the `_is_hf_initialized` on tied weight keys, to avoid initializing them as they are going to
be tied anyway.
"""
# Old checkpoints may have keys for rotary_emb.inv_freq forach layer, however we moved this buffer to the main model
# (so the buffer name has changed). Remove them in such a case. This is another exception that was not added to
# `_keys_to_ignore_on_load_unexpected` as it touches many models -> we add it manually to the existing patterns
has_inv_freq_buffers = any(buffer.endswith("rotary_emb.inv_freq") for buffer, _ in self.named_buffers())
additional_unexpected_patterns = [r"rotary_emb\.inv_freq"] if has_inv_freq_buffers else []
missing_patterns = self._keys_to_ignore_on_load_missing or []
unexpected_patterns = (self._keys_to_ignore_on_load_unexpected or []) + additional_unexpected_patterns
ignore_missing_regex, ignore_unexpected_regex = None, None
if len(missing_patterns) > 0:
ignore_missing_regex = re.compile("|".join(rf"({pattern})" for pattern in missing_patterns))
if len(unexpected_patterns) > 0:
ignore_unexpected_regex = re.compile("|".join(rf"({pattern})" for pattern in unexpected_patterns))
# Clean-up missing keys
if ignore_missing_regex is not None:
missing_keys = {key for key in missing_keys if ignore_missing_regex.search(key) is None}
# Clean-up unexpected keys
if ignore_unexpected_regex is not None:
unexpected_keys = {key for key in unexpected_keys if ignore_unexpected_regex.search(key) is None}
return missing_keys, unexpected_keys
def mark_tied_weights_as_initialized(self):
"""Adds the `_is_hf_initialized` flag on parameters that will be tied, in order to avoid initializing them
later as they will be tied (overwritten) anyway.
This is very important as most embeddings are tied, and they are huge params (vocabularies are often 256k), so
running inits on them is very costly."""
for tied_param in self.all_tied_weights_keys.keys():
param = self.get_parameter(tied_param)
param._is_hf_initialized = True
def get_parameter_or_buffer(self, target: str):
"""
Return the parameter or buffer given by `target` if it exists, otherwise throw an error. This combines
`get_parameter()` and `get_buffer()` in a single handy function. If the target is an `_extra_state` attribute,
it will return the extra state provided by the module. Note that it only work if `target` is a leaf of the model.
"""
try:
return self.get_parameter(target)
except AttributeError:
pass
try:
return self.get_buffer(target)
except AttributeError:
pass
module, param_name = get_module_from_name(self, target)
if (
param_name == "_extra_state"
and getattr(module.__class__, "get_extra_state", torch.nn.Module.get_extra_state)
is not torch.nn.Module.get_extra_state
):
return module.get_extra_state()
raise AttributeError(f"`{target}` is neither a parameter, buffer, nor extra state.")
def train(self, mode: bool = True):
out = super().train(mode)
if self.use_kernels:
self.kernelize()
return out
def eval(self):
return self.train(False)
PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub)
if PreTrainedModel.push_to_hub.__doc__ is not None:
PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format(
object="model", object_class="AutoModel", object_files="model file"
)
def unwrap_model(model: nn.Module, recursive: bool = False) -> nn.Module:
"""
Recursively unwraps a model from potential containers (as used in distributed training).
Args:
model (`torch.nn.Module`): The model to unwrap.
recursive (`bool`, *optional*, defaults to `False`):
Whether to recursively extract all cases of `module.module` from `model` as well as unwrap child sublayers
recursively, not just the top-level distributed containers.
"""
# Use accelerate implementation if available (should always be the case when using torch)
# This is for pytorch, as we also have to handle things like dynamo
if is_accelerate_available():
kwargs = {}
if recursive:
kwargs["recursive"] = recursive
return extract_model_from_parallel(model, **kwargs)
else:
# since there could be multiple levels of wrapping, unwrap recursively
if hasattr(model, "module"):
return unwrap_model(model.module)
else:
return model
def is_accelerator_device(device: Union[str, int, torch.device]) -> bool:
"""Check if the device is an accelerator. We need to function, as device_map can be "disk" as well, which is not
a proper `torch.device`.
"""
if device == "disk":
return False
else:
return torch.device(device).type not in ["meta", "cpu"]
def caching_allocator_warmup(model: PreTrainedModel, expanded_device_map: dict, hf_quantizer: Optional[HfQuantizer]):
"""This function warm-ups the caching allocator based on the size of the model tensors that will reside on each
device. It allows to have one large call to Malloc, instead of recursively calling it later when loading
the model, which is actually the loading speed bottleneck.
Calling this function allows to cut the model loading time by a very large margin.
A few facts related to loading speed (taking into account the use of this function):
- When loading a model the first time, it is usually slower than the subsequent times, because the OS is very likely
to cache the different state dicts (if enough resources/RAM are available)
- Trying to force the OS to cache the files in advance (by e.g. accessing a small portion of them) is really hard,
and not a good idea in general as this is low level OS optimizations that depend on resource usage anyway
- As of 18/03/2025, loading a Llama 70B model with TP takes ~1 min without file cache, and ~13s with full file cache.
The baseline, i.e. only loading the tensor shards on device and adjusting dtype (i.e. copying them) is ~5s with full cache.
These numbers are reported for TP on 4 H100 GPUs.
- It is useless to pre-allocate more than the model size in this function (i.e. using an `allocation_factor` > 1) as
cudaMalloc is not a bottleneck at all anymore
- Loading speed bottleneck is now almost only tensor copy (i.e. changing the dtype) and moving the tensors to the devices.
However, we cannot really improve on those aspects obviously, as the data needs to be moved/copied in the end.
"""
factor = 2 if hf_quantizer is None else hf_quantizer.get_accelerator_warm_up_factor()
# Remove disk, cpu and meta devices, and cast to proper torch.device
accelerator_device_map = {
param: torch.device(device) for param, device in expanded_device_map.items() if is_accelerator_device(device)
}
if not accelerator_device_map:
return
tp_plan = getattr(model, "_tp_plan", []) or []
tp_plan_regex = (
re.compile("|".join([re.escape(plan) for plan in tp_plan]))
if _torch_distributed_available and torch.distributed.is_initialized()
else None
)
total_byte_count = defaultdict(lambda: 0)
tied_param_names = model.all_tied_weights_keys.keys()
for param_name, device in accelerator_device_map.items():
# Skip if the parameter has already been accounted for (tied weights)
if param_name in tied_param_names:
continue
# For example in the case of MXFP4 quantization, we need to update the param name to the original param name
# because the checkpoint contains blocks, and scales, but since we are dequantizing, we need to use the original param name
if hf_quantizer is not None:
param_name = hf_quantizer.get_param_name(param_name)
try:
param = model.get_parameter_or_buffer(param_name)
except AttributeError:
# TODO: for now let's skip if we can't find the parameters
if hf_quantizer is not None:
continue
raise AttributeError(f"Parameter {param_name} not found in model")
# The dtype of different parameters may be different with composite models or `keep_in_fp32_modules`
param_byte_count = param.numel() * param.element_size()
if tp_plan_regex is not None:
generic_name = re.sub(r"\.\d+\.", ".*.", param_name)
param_byte_count //= torch.distributed.get_world_size() if tp_plan_regex.search(generic_name) else 1
total_byte_count[device] += param_byte_count
# This will kick off the caching allocator to avoid having to Malloc afterwards
for device, byte_count in total_byte_count.items():
if device.type in ["cuda", "xpu"]:
torch_accelerator_module = getattr(torch, device.type)
index = device.index if device.index is not None else torch_accelerator_module.current_device()
device_memory = torch_accelerator_module.mem_get_info(index)[0]
# Allow up to (max device memory - 1.2 GiB) in resource-constrained hardware configurations. Trying to reserve more
# than that amount might sometimes lead to unnecessary cuda/xpu OOM, if the last parameter to be loaded on the device is large,
# and the remaining reserved memory portion is smaller than the param size -> torch will then try to fully re-allocate all
# the param size, instead of using the remaining reserved part, and allocating only the difference, which can lead
# to OOM. See https://github.com/huggingface/transformers/issues/37436#issuecomment-2808982161 for more details.
# Note that we use an absolute value instead of device proportion here, as a 8GiB device could still allocate too much
# if using e.g. 90% of device size, while a 140GiB device would allocate too little
byte_count = min(byte_count, max(0, int(device_memory - 1.2 * 1024**3)))
# If there is *unused* reserved cuda/xpu memory, we can skip/reduce the allocation.
unused_memory = torch_accelerator_module.memory_reserved(
index
) - torch_accelerator_module.memory_allocated(index)
byte_count = max(0, byte_count - unused_memory)
# Allocate memory
_ = torch.empty(byte_count // factor, dtype=torch.float16, device=device, requires_grad=False)
| PreTrainedModel |
python | facebook__pyre-check | tools/typeshed_patcher/typeshed.py | {
"start": 3882,
"end": 4744
} | class ____(Typeshed):
"""
A typeshed backed up by a directory that lives on the filesystem.
For simplicity, we assume that files in this directory remain unchanged. If
the assumption does not hold, e.g. when files are added/removed/changed after
the creation of a `DirectoryBacked` object, the behaviors of its methods
become undefined.
"""
root: pathlib.Path
files: Set[pathlib.Path]
def __init__(self, root: pathlib.Path) -> None:
self.root = root
self.files = {
path.relative_to(root) for path in root.rglob("*") if path.is_file()
}
def all_files(self) -> Iterable[pathlib.Path]:
return self.files
def get_file_content(self, path: pathlib.Path) -> Optional[str]:
return (self.root / path).read_text() if path in self.files else None
| DirectoryBackedTypeshed |
python | getsentry__sentry | tests/sentry/snuba/test_subscriptions.py | {
"start": 725,
"end": 4491
} | class ____(TestCase):
def test(self) -> None:
query_type = SnubaQuery.Type.ERROR
dataset = Dataset.Events
query = "level:error"
aggregate = "count()"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
snuba_query = create_snuba_query(
query_type, dataset, query, aggregate, time_window, resolution, None
)
assert snuba_query.type == query_type.value
assert snuba_query.dataset == dataset.value
assert snuba_query.query == query
assert snuba_query.aggregate == aggregate
assert snuba_query.time_window == int(time_window.total_seconds())
assert snuba_query.resolution == int(resolution.total_seconds())
assert snuba_query.environment is None
assert set(snuba_query.event_types) == {SnubaQueryEventType.EventType.ERROR}
def test_environment(self) -> None:
query_type = SnubaQuery.Type.ERROR
dataset = Dataset.Events
query = "level:error"
aggregate = "count()"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
snuba_query = create_snuba_query(
query_type, dataset, query, aggregate, time_window, resolution, self.environment
)
assert snuba_query.type == query_type.value
assert snuba_query.dataset == dataset.value
assert snuba_query.query == query
assert snuba_query.aggregate == aggregate
assert snuba_query.time_window == int(time_window.total_seconds())
assert snuba_query.resolution == int(resolution.total_seconds())
assert snuba_query.environment == self.environment
assert set(snuba_query.event_types) == {SnubaQueryEventType.EventType.ERROR}
def test_event_types(self) -> None:
query_type = SnubaQuery.Type.ERROR
dataset = Dataset.Events
query = "level:error"
aggregate = "count()"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
snuba_query = create_snuba_query(
query_type,
dataset,
query,
aggregate,
time_window,
resolution,
None,
[SnubaQueryEventType.EventType.DEFAULT],
)
assert snuba_query.type == query_type.value
assert snuba_query.dataset == dataset.value
assert snuba_query.query == query
assert snuba_query.aggregate == aggregate
assert snuba_query.time_window == int(time_window.total_seconds())
assert snuba_query.resolution == int(resolution.total_seconds())
assert snuba_query.environment is None
assert set(snuba_query.event_types) == {SnubaQueryEventType.EventType.DEFAULT}
def test_event_types_metrics(self) -> None:
query_type = SnubaQuery.Type.CRASH_RATE
dataset = Dataset.Metrics
query = ""
aggregate = "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
snuba_query = create_snuba_query(
query_type,
dataset,
query,
aggregate,
time_window,
resolution,
None,
)
assert snuba_query.type == query_type.value
assert snuba_query.dataset == dataset.value
assert snuba_query.query == query
assert snuba_query.aggregate == aggregate
assert snuba_query.time_window == int(time_window.total_seconds())
assert snuba_query.resolution == int(resolution.total_seconds())
assert snuba_query.environment is None
assert snuba_query.event_types == []
| CreateSnubaQueryTest |
python | django-import-export__django-import-export | tests/core/tests/admin_integration/test_import_encoding.py | {
"start": 156,
"end": 3493
} | class ____(AdminTestMixin, TestCase):
"""Test handling 'confirm import' step using different file encodings
and storage types.
"""
def _is_str_in_response(self, filename, input_format, encoding=None):
super()._assert_string_in_response(
self.book_import_url,
filename,
input_format,
encoding=encoding,
str_in_response="test@example.com",
)
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.TempFolderStorage"
)
def test_import_action_handles_TempFolderStorage_read(self):
self._is_str_in_response("books.csv", "0")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.TempFolderStorage"
)
def test_import_action_handles_TempFolderStorage_read_mac(self):
self._is_str_in_response("books-mac.csv", "0")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.TempFolderStorage"
)
def test_import_action_handles_TempFolderStorage_read_iso_8859_1(self):
self._is_str_in_response("books-ISO-8859-1.csv", "0", "ISO-8859-1")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.TempFolderStorage"
)
def test_import_action_handles_TempFolderStorage_read_binary(self):
self._is_str_in_response("books.xls", "1")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.CacheStorage"
)
def test_import_action_handles_CacheStorage_read(self):
self._is_str_in_response("books.csv", "0")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.CacheStorage"
)
def test_import_action_handles_CacheStorage_read_mac(self):
self._is_str_in_response("books-mac.csv", "0")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.CacheStorage"
)
def test_import_action_handles_CacheStorage_read_iso_8859_1(self):
self._is_str_in_response("books-ISO-8859-1.csv", "0", "ISO-8859-1")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.CacheStorage"
)
def test_import_action_handles_CacheStorage_read_binary(self):
self._is_str_in_response("books.xls", "1")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.MediaStorage"
)
def test_import_action_handles_MediaStorage_read(self):
self._is_str_in_response("books.csv", "0")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.MediaStorage"
)
def test_import_action_handles_MediaStorage_read_mac(self):
self._is_str_in_response("books-mac.csv", "0")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.MediaStorage"
)
def test_import_action_handles_MediaStorage_read_iso_8859_1(self):
self._is_str_in_response("books-ISO-8859-1.csv", "0", "ISO-8859-1")
@override_settings(
IMPORT_EXPORT_TMP_STORAGE_CLASS="import_export.tmp_storages.MediaStorage"
)
def test_import_action_handles_MediaStorage_read_binary(self):
self._is_str_in_response("books.xls", "1")
| ConfirmImportEncodingTest |
python | Pylons__pyramid | src/pyramid/testing.py | {
"start": 18708,
"end": 20096
} | class ____:
"""Registered by
:meth:`pyramid.config.Configurator.testing_add_renderer` as
a dummy renderer factory. The indecision about what to use as a
key (a spec vs. a relative name) is caused by test suites in the
wild believing they can register either. The ``factory`` argument
passed to this constructor is usually the *real* template renderer
factory, found when ``testing_add_renderer`` is called."""
def __init__(self, name, factory):
self.name = name
self.factory = factory # the "real" renderer factory reg'd previously
self.renderers = {}
def add(self, spec, renderer):
self.renderers[spec] = renderer
if ':' in spec:
package, relative = spec.split(':', 1)
self.renderers[relative] = renderer
def __call__(self, info):
spec = info.name
renderer = self.renderers.get(spec)
if renderer is None:
if ':' in spec:
package, relative = spec.split(':', 1)
renderer = self.renderers.get(relative)
if renderer is None:
if self.factory:
renderer = self.factory(info)
else:
raise KeyError(
'No testing renderer registered for %r' % spec
)
return renderer
| DummyRendererFactory |
python | walkccc__LeetCode | solutions/313. Super Ugly Number/313-2.py | {
"start": 208,
"end": 701
} | class ____:
def nthSuperUglyNumber(self, n: int, primes: list[int]) -> int:
minHeap = [] # (value, prime, index)
uglyNums = [1]
for prime in primes:
heapq.heappush(minHeap, (prime * uglyNums[0], prime, 1))
while len(uglyNums) < n:
uglyNums.append(minHeap[0][0])
while minHeap[0][0] == uglyNums[-1]:
_, prime, index = heapq.heappop(minHeap)
heapq.heappush(minHeap, (prime * uglyNums[index], prime, index + 1))
return uglyNums[-1]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/most-frequent-ids.py | {
"start": 91,
"end": 790
} | class ____(object):
def mostFrequentIDs(self, nums, freq):
"""
:type nums: List[int]
:type freq: List[int]
:rtype: List[int]
"""
result = []
cnt = collections.Counter()
max_heap = []
for x, f in itertools.izip(nums, freq):
cnt[x] += f
heapq.heappush(max_heap, (-cnt[x], x))
while max_heap and -max_heap[0][0] != cnt[max_heap[0][1]]:
heapq.heappop(max_heap)
result.append(-max_heap[0][0] if max_heap else 0)
return result
# Time: O(nlogn)
# Space: O(n)
import collections
import itertools
from sortedcontainers import SortedList
# sorted list
| Solution |
python | doocs__leetcode | solution/1000-1099/1064.Fixed Point/Solution.py | {
"start": 0,
"end": 327
} | class ____:
def fixedPoint(self, arr: List[int]) -> int:
left, right = 0, len(arr) - 1
while left < right:
mid = (left + right) >> 1
if arr[mid] >= mid:
right = mid
else:
left = mid + 1
return left if arr[left] == left else -1
| Solution |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_lat_lon_to_be_land_or_ocean.py | {
"start": 1770,
"end": 7606
} | class ____(ColumnMapExpectation):
"""Expect values in a column to be lat lon pairs that represent a point on land or in an ocean.
Args:
column (str): \
The column name.
land_or_ocean (str): \
Either 'land' or 'ocean'. \
represents whether to check if each point is on land or in an ocean.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"lat_lon_on_land": [
(32.699316, -117.063457),
(33.570321, -116.884380),
(33.598757, -117.721397),
],
"lat_lon_in_ocean": [
(20.699316, -117.063457),
(50.699316, -45.063457),
(-3.699316, 45.063457),
],
},
"tests": [
{
"title": "positive_for_lat_lon_on_land",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "lat_lon_on_land",
"land_or_ocean": "land",
"mostly": 1,
},
"out": {"success": True},
},
{
"title": "negative_for_lat_lon_on_land",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "lat_lon_on_land",
"land_or_ocean": "ocean",
"mostly": 0.2,
},
"out": {"success": False},
},
{
"title": "positive_for_lat_lon_in_ocean",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "lat_lon_in_ocean",
"land_or_ocean": "ocean",
"mostly": 1,
},
"out": {"success": True},
},
{
"title": "negative_for_lat_lon_in_ocean",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "lat_lon_in_ocean",
"land_or_ocean": "land",
"mostly": 0.2,
},
"out": {"success": False},
},
],
},
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.lat_lon_land_or_ocean"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"mostly",
"land_or_ocean",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"mostly": 1,
}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"geospatial",
"hackathon-2022",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@mmi333", # Don't forget to add your github handle here!
],
"requirements": ["global-land-mask"],
}
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: ExpectationConfiguration = None,
result: ExpectationValidationResult = None,
runtime_configuration: dict = None,
**kwargs,
) -> List[
Union[
dict,
str,
RenderedStringTemplateContent,
RenderedTableContent,
RenderedBulletListContent,
RenderedGraphContent,
Any,
]
]:
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"mostly",
],
)
template_str = "values must be lat lon pairs that represent a point"
if params["land_or_ocean"] == "land":
template_str += " on land"
elif params["land_or_ocean"] == "ocean":
template_str += " in an ocean"
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if include_column_name:
template_str = f"$column {template_str}"
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
if __name__ == "__main__":
ExpectColumnValuesLatLonToBeLandOrOcean().print_diagnostic_checklist()
| ExpectColumnValuesLatLonToBeLandOrOcean |
python | getsentry__sentry | src/sentry/audit_log/events.py | {
"start": 4588,
"end": 4888
} | class ____(AuditLogEvent):
def __init__(self) -> None:
super().__init__(event_id=21, name="TEAM_EDIT", api_name="team.edit")
def render(self, audit_log_entry: AuditLogEntry) -> str:
slug = audit_log_entry.data["slug"]
return f"edited team {slug}"
| TeamEditAuditLogEvent |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.