language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | redis__redis-py | tests/test_cache.py | {
"start": 1127,
"end": 14271
} | class ____:
@pytest.mark.parametrize(
"r",
[
{
"cache": DefaultCache(CacheConfig(max_size=5)),
"single_connection_client": True,
},
{
"cache": DefaultCache(CacheConfig(max_size=5)),
"single_connection_client": False,
},
{
"cache": DefaultCache(CacheConfig(max_size=5)),
"single_connection_client": False,
"decode_responses": True,
},
],
ids=["single", "pool", "decoded"],
indirect=True,
)
@pytest.mark.onlynoncluster
def test_get_from_given_cache(self, r, r2):
cache = r.get_cache()
# add key to redis
r.set("foo", "bar")
# get key from redis and save in local cache
assert r.get("foo") in [b"bar", "bar"]
# get key from local cache
assert cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value in [
b"bar",
"bar",
]
# change key in redis (cause invalidation)
r2.set("foo", "barbar")
# Retrieves a new value from server and cache it
assert r.get("foo") in [b"barbar", "barbar"]
# Make sure that new value was cached
assert cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value in [
b"barbar",
"barbar",
]
@pytest.mark.parametrize(
"r",
[
{
"cache": DefaultCache(CacheConfig(max_size=5)),
"single_connection_client": True,
},
{
"cache": DefaultCache(CacheConfig(max_size=5)),
"single_connection_client": False,
},
{
"cache": DefaultCache(CacheConfig(max_size=5)),
"single_connection_client": False,
"decode_responses": True,
},
],
ids=["single", "pool", "decoded"],
indirect=True,
)
@pytest.mark.onlynoncluster
def test_hash_get_from_given_cache(self, r, r2):
cache = r.get_cache()
hash_key = "hash_foo_key"
field_1 = "bar"
field_2 = "bar2"
# add hash key to redis
r.hset(hash_key, field_1, "baz")
r.hset(hash_key, field_2, "baz2")
# get keys from redis and save them in local cache
assert r.hget(hash_key, field_1) in [b"baz", "baz"]
assert r.hget(hash_key, field_2) in [b"baz2", "baz2"]
# get key from local cache
assert cache.get(
CacheKey(
command="HGET",
redis_keys=(hash_key,),
redis_args=("HGET", hash_key, field_1),
)
).cache_value in [
b"baz",
"baz",
]
assert cache.get(
CacheKey(
command="HGET",
redis_keys=(hash_key,),
redis_args=("HGET", hash_key, field_2),
)
).cache_value in [
b"baz2",
"baz2",
]
# change key in redis (cause invalidation)
r2.hset(hash_key, field_1, "barbar")
# Retrieves a new value from server and cache it
assert r.hget(hash_key, field_1) in [b"barbar", "barbar"]
# Make sure that new value was cached
assert cache.get(
CacheKey(
command="HGET",
redis_keys=(hash_key,),
redis_args=("HGET", hash_key, field_1),
)
).cache_value in [
b"barbar",
"barbar",
]
# The other field is also reset, because the invalidation message contains only the hash key.
assert (
cache.get(
CacheKey(
command="HGET",
redis_keys=(hash_key,),
redis_args=("HGET", hash_key, field_2),
)
)
is None
)
assert r.hget(hash_key, field_2) in [b"baz2", "baz2"]
@pytest.mark.parametrize(
"r",
[
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": True,
},
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": False,
},
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": False,
"decode_responses": True,
},
],
ids=["single", "pool", "decoded"],
indirect=True,
)
@pytest.mark.onlynoncluster
def test_get_from_default_cache(self, r, r2):
cache = r.get_cache()
assert isinstance(cache.eviction_policy, LRUPolicy)
assert cache.config.get_max_size() == 128
# add key to redis
r.set("foo", "bar")
# get key from redis and save in local cache
assert r.get("foo") in [b"bar", "bar"]
# get key from local cache
assert cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value in [
b"bar",
"bar",
]
# change key in redis (cause invalidation)
r2.set("foo", "barbar")
# Add a small delay to allow invalidation to be processed
time.sleep(0.1)
# Retrieves a new value from server and cache it
assert r.get("foo") in [b"barbar", "barbar"]
# Make sure that new value was cached
assert cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value in [
b"barbar",
"barbar",
]
@pytest.mark.parametrize(
"r",
[
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": True,
},
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": False,
},
],
ids=["single", "pool"],
indirect=True,
)
@pytest.mark.onlynoncluster
def test_cache_clears_on_disconnect(self, r, cache):
cache = r.get_cache()
# add key to redis
r.set("foo", "bar")
# get key from redis and save in local cache
assert r.get("foo") == b"bar"
# get key from local cache
assert (
cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value
== b"bar"
)
# Force disconnection
r.connection_pool.get_connection().disconnect()
# Make sure cache is empty
assert cache.size == 0
@pytest.mark.parametrize(
"r",
[
{
"cache_config": CacheConfig(max_size=3),
"single_connection_client": True,
},
{
"cache_config": CacheConfig(max_size=3),
"single_connection_client": False,
},
],
ids=["single", "pool"],
indirect=True,
)
@pytest.mark.onlynoncluster
def test_cache_lru_eviction(self, r, cache):
cache = r.get_cache()
# add 3 keys to redis
r.set("foo", "bar")
r.set("foo2", "bar2")
r.set("foo3", "bar3")
# get 3 keys from redis and save in local cache
assert r.get("foo") == b"bar"
assert r.get("foo2") == b"bar2"
assert r.get("foo3") == b"bar3"
# get the 3 keys from local cache
assert (
cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value
== b"bar"
)
assert (
cache.get(
CacheKey(
command="GET", redis_keys=("foo2",), redis_args=("GET", "foo2")
)
).cache_value
== b"bar2"
)
assert (
cache.get(
CacheKey(
command="GET", redis_keys=("foo3",), redis_args=("GET", "foo3")
)
).cache_value
== b"bar3"
)
# add 1 more key to redis (exceed the max size)
r.set("foo4", "bar4")
assert r.get("foo4") == b"bar4"
# the first key is not in the local cache anymore
assert (
cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
)
is None
)
assert cache.size == 3
@pytest.mark.parametrize(
"r",
[
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": True,
},
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": False,
},
],
ids=["single", "pool"],
indirect=True,
)
@pytest.mark.onlynoncluster
def test_cache_ignore_not_allowed_command(self, r):
cache = r.get_cache()
# add fields to hash
assert r.hset("foo", "bar", "baz")
# get random field
assert r.hrandfield("foo") == b"bar"
assert (
cache.get(
CacheKey(
command="HRANDFIELD",
redis_keys=("foo",),
redis_args=("HRANDFIELD", "foo"),
)
)
is None
)
@pytest.mark.parametrize(
"r",
[
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": True,
},
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": False,
},
],
ids=["single", "pool"],
indirect=True,
)
@pytest.mark.onlynoncluster
def test_cache_invalidate_all_related_responses(self, r):
cache = r.get_cache()
# Add keys
assert r.set("foo", "bar")
assert r.set("bar", "foo")
res = r.mget("foo", "bar")
# Make sure that replies was cached
assert res == [b"bar", b"foo"]
assert (
cache.get(
CacheKey(
command="MGET",
redis_keys=("foo", "bar"),
redis_args=("MGET", "foo", "bar"),
)
).cache_value
== res
)
# Make sure that objects are immutable.
another_res = r.mget("foo", "bar")
res.append(b"baz")
assert another_res != res
# Invalidate one of the keys and make sure that
# all associated cached entries was removed
assert r.set("foo", "baz")
assert r.get("foo") == b"baz"
assert (
cache.get(
CacheKey(
command="MGET",
redis_keys=("foo", "bar"),
redis_args=("MGET", "foo", "bar"),
)
)
is None
)
assert (
cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value
== b"baz"
)
@pytest.mark.parametrize(
"r",
[
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": True,
},
{
"cache_config": CacheConfig(max_size=128),
"single_connection_client": False,
},
],
ids=["single", "pool"],
indirect=True,
)
@pytest.mark.onlynoncluster
def test_cache_flushed_on_server_flush(self, r):
cache = r.get_cache()
# Add keys
assert r.set("foo", "bar")
assert r.set("bar", "foo")
assert r.set("baz", "bar")
# Make sure that replies was cached
assert r.get("foo") == b"bar"
assert r.get("bar") == b"foo"
assert r.get("baz") == b"bar"
assert (
cache.get(
CacheKey(command="GET", redis_keys=("foo",), redis_args=("GET", "foo"))
).cache_value
== b"bar"
)
assert (
cache.get(
CacheKey(command="GET", redis_keys=("bar",), redis_args=("GET", "bar"))
).cache_value
== b"foo"
)
assert (
cache.get(
CacheKey(command="GET", redis_keys=("baz",), redis_args=("GET", "baz"))
).cache_value
== b"bar"
)
# Flush server and trying to access cached entry
assert r.flushall()
assert r.get("foo") is None
assert cache.size == 0
@pytest.mark.onlycluster
@skip_if_resp_version(2)
@skip_if_server_version_lt("7.4.0")
| TestCache |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 35169,
"end": 36089
} | class ____(ASTExpression):
def __init__(self, typ: ASTType) -> None:
self.typ = typ
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTSizeofType):
return NotImplemented
return self.typ == other.typ
def __hash__(self) -> int:
return hash(self.typ)
def _stringify(self, transform: StringifyTransform) -> str:
return 'sizeof(' + transform(self.typ) + ')'
def get_id(self, version: int) -> str:
return 'st' + self.typ.get_id(version)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_keyword('sizeof', 'sizeof')
signode += addnodes.desc_sig_punctuation('(', '(')
self.typ.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
| ASTSizeofType |
python | getsentry__sentry | tests/sentry/replays/usecases/test_summarize.py | {
"start": 34264,
"end": 55630
} | class ____(
TransactionTestCase,
SnubaTestCase,
):
def setUp(self) -> None:
super().setUp()
self.replay_id = uuid.uuid4().hex
def store_replay(self, dt: datetime | None = None, **kwargs: Any) -> None:
replay = mock_replay(
dt or datetime.now(UTC) - timedelta(minutes=1),
self.project.id,
self.replay_id,
**kwargs,
)
response = requests.post(
settings.SENTRY_SNUBA + "/tests/entities/replays/insert", json=[replay]
)
assert response.status_code == 200
def save_recording_segment(
self, segment_id: int, data: bytes, compressed: bool = True, is_archived: bool = False
) -> None:
metadata = RecordingSegmentStorageMeta(
project_id=self.project.id,
replay_id=self.replay_id,
segment_id=segment_id,
retention_days=30,
file_id=None,
)
FilestoreBlob().set(metadata, zlib.compress(data) if compressed else data)
def test_rpc_simple(self) -> None:
now = datetime.now(UTC)
replay_start = now - timedelta(minutes=1)
data = [
{
"type": 5,
"timestamp": replay_start.timestamp() * 1000,
"data": {
"tag": "breadcrumb",
"payload": {"category": "console", "message": "hello"},
},
},
{
"type": 5,
"timestamp": replay_start.timestamp() * 1000,
"data": {
"tag": "breadcrumb",
"payload": {"category": "console", "message": "world"},
},
},
]
self.save_recording_segment(0, json.dumps(data).encode())
self.save_recording_segment(1, json.dumps([]).encode())
self.store_replay(dt=replay_start)
response = rpc_get_replay_summary_logs(
self.project.id,
self.replay_id,
2,
)
timestamp_ms = replay_start.timestamp() * 1000
assert response == {
"logs": [f"Logged: 'hello' at {timestamp_ms}", f"Logged: 'world' at {timestamp_ms}"]
}
def test_rpc_with_both_direct_and_trace_connected_errors(self) -> None:
"""Test handling of breadcrumbs with both direct and trace connected errors. Error logs should not be duplicated."""
now = datetime.now(UTC)
trace_id = uuid.uuid4().hex
span_id = "1" + uuid.uuid4().hex[:15]
# Create a direct error event that is not trace connected.
direct_event_id = uuid.uuid4().hex
direct_error_timestamp = (now - timedelta(minutes=5)).timestamp()
self.store_event(
data={
"event_id": direct_event_id,
"timestamp": direct_error_timestamp,
"exception": {
"values": [
{
"type": "ZeroDivisionError",
"value": "division by zero",
}
]
},
"contexts": {
"replay": {"replay_id": self.replay_id},
"trace": {
"type": "trace",
"trace_id": uuid.uuid4().hex,
"span_id": span_id,
},
},
},
project_id=self.project.id,
)
# Create a trace connected error event
connected_event_id = uuid.uuid4().hex
connected_error_timestamp = (now - timedelta(minutes=3)).timestamp()
project_2 = self.create_project()
self.store_event(
data={
"event_id": connected_event_id,
"timestamp": connected_error_timestamp,
"exception": {
"values": [
{
"type": "ConnectionError",
"value": "Failed to connect to database",
}
]
},
"contexts": {
"trace": {
"type": "trace",
"trace_id": trace_id,
"span_id": span_id,
}
},
},
project_id=project_2.id,
)
# Store the replay with both error IDs and trace IDs in the time range
self.store_replay(dt=now - timedelta(minutes=10), segment_id=0, trace_ids=[trace_id])
self.store_replay(
dt=now - timedelta(minutes=1),
segment_id=1,
error_ids=[direct_event_id],
trace_ids=[trace_id],
)
data = [
{
"type": 5,
"timestamp": (now - timedelta(minutes=1)).timestamp() * 1000,
"data": {
"tag": "breadcrumb",
"payload": {"category": "console", "message": "hello"},
},
}
]
self.save_recording_segment(0, json.dumps(data).encode())
response = rpc_get_replay_summary_logs(
self.project.id,
self.replay_id,
1,
)
logs = response["logs"]
assert len(logs) == 3
assert any("ZeroDivisionError" in log for log in logs)
assert any("division by zero" in log for log in logs)
assert any("ConnectionError" in log for log in logs)
assert any("Failed to connect to database" in log for log in logs)
def test_rpc_with_feedback_breadcrumb(self) -> None:
"""Test handling of a feedback breadcrumb when the feedback
is in nodestore, but hasn't reached Snuba yet.
If the feedback is in Snuba (guaranteed for SDK v8.0.0+),
it should be de-duped like in the duplicate_feedback test below."""
dt = datetime.now(UTC) - timedelta(minutes=3)
feedback_event_id = uuid.uuid4().hex
self.store_event(
data={
"type": "feedback",
"event_id": feedback_event_id,
"timestamp": dt.timestamp(),
"contexts": {
"feedback": {
"contact_email": "josh.ferge@sentry.io",
"name": "Josh Ferge",
"message": "Great website!",
"replay_id": self.replay_id,
"url": "https://sentry.sentry.io/feedback/?statsPeriod=14d",
},
},
},
project_id=self.project.id,
)
self.store_replay(dt=dt)
data = [
{
"type": 5,
"timestamp": dt.timestamp() * 1000,
"data": {
"tag": "breadcrumb",
"payload": {
"category": "sentry.feedback",
"data": {"feedbackId": feedback_event_id},
},
},
},
]
self.save_recording_segment(0, json.dumps(data).encode())
response = rpc_get_replay_summary_logs(
self.project.id,
self.replay_id,
1,
)
logs = response["logs"]
assert len(logs) == 1
assert "User submitted feedback: 'Great website!'" in logs[0]
def test_rpc_with_trace_errors_both_datasets(self) -> None:
"""Test that trace connected error snuba query works correctly with both datasets."""
now = datetime.now(UTC)
project_1 = self.create_project()
project_2 = self.create_project()
# Create regular error event - errors dataset
event_id_1 = uuid.uuid4().hex
trace_id_1 = uuid.uuid4().hex
dt_1 = now - timedelta(minutes=5)
self.store_event(
data={
"event_id": event_id_1,
"timestamp": dt_1.timestamp(),
"exception": {
"values": [
{
"type": "ValueError",
"value": "Invalid input",
}
]
},
"contexts": {
"trace": {
"type": "trace",
"trace_id": trace_id_1,
"span_id": "1" + uuid.uuid4().hex[:15],
}
},
},
project_id=project_1.id,
)
# Create feedback event - issuePlatform dataset
event_id_2 = uuid.uuid4().hex
trace_id_2 = uuid.uuid4().hex
dt_2 = now - timedelta(minutes=2)
feedback_data = {
"type": "feedback",
"event_id": event_id_2,
"timestamp": dt_2.timestamp(),
"contexts": {
"feedback": {
"contact_email": "test@example.com",
"name": "Test User",
"message": "Great website",
"replay_id": self.replay_id,
"url": "https://example.com",
},
"trace": {
"type": "trace",
"trace_id": trace_id_2,
"span_id": "2" + uuid.uuid4().hex[:15],
},
},
}
create_feedback_issue(
feedback_data, project_2, FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE
)
# Store the replay with all trace IDs
self.store_replay(dt=dt_1, segment_id=0, trace_ids=[trace_id_1])
self.store_replay(dt=dt_2, segment_id=1, trace_ids=[trace_id_2])
data = [
{
"type": 5,
"timestamp": dt_1.timestamp() * 1000 + 3000,
"data": {
"tag": "breadcrumb",
"payload": {"category": "console", "message": "hello"},
},
},
]
self.save_recording_segment(0, json.dumps(data).encode())
response = rpc_get_replay_summary_logs(
self.project.id,
self.replay_id,
1,
)
logs = response["logs"]
assert len(logs) == 3
# Verify that regular error event is included
assert "ValueError" in logs[0]
assert "Invalid input" in logs[0]
assert "User experienced an error" in logs[0]
assert "hello" in logs[1]
# Verify that feedback event is included
assert "Great website" in logs[2]
assert "User submitted feedback" in logs[2]
@patch("sentry.replays.usecases.summarize.fetch_feedback_details")
def test_rpc_with_trace_errors_duplicate_feedback(
self, mock_fetch_feedback_details: MagicMock
) -> None:
"""Test that duplicate feedback events are filtered.
Duplicates may happen when the replay has a feedback breadcrumb,
and the feedback is also returned from the Snuba query for trace-connected errors."""
now = datetime.now(UTC)
feedback_event_id = uuid.uuid4().hex
feedback_event_id_2 = uuid.uuid4().hex
trace_id = uuid.uuid4().hex
trace_id_2 = uuid.uuid4().hex
# Create feedback event - issuePlatform dataset
feedback_data: dict[str, Any] = {
"type": "feedback",
"event_id": feedback_event_id,
"timestamp": (now - timedelta(minutes=3)).timestamp(),
"contexts": {
"feedback": {
"contact_email": "test@example.com",
"name": "Test User",
"message": "Great website",
"replay_id": self.replay_id,
"url": "https://example.com",
},
"trace": {
"type": "trace",
"trace_id": trace_id,
"span_id": "1" + uuid.uuid4().hex[:15],
},
},
}
# Create another feedback event - issuePlatform dataset
feedback_data_2: dict[str, Any] = {
"type": "feedback",
"event_id": feedback_event_id_2,
"timestamp": (now - timedelta(minutes=2)).timestamp(),
"contexts": {
"feedback": {
"contact_email": "test2@example.com",
"name": "Test User 2",
"message": "Broken website",
"replay_id": self.replay_id,
"url": "https://example.com",
},
"trace": {
"type": "trace",
"trace_id": trace_id_2,
"span_id": "1" + uuid.uuid4().hex[:15],
},
},
}
create_feedback_issue(
feedback_data, self.project, FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE
)
create_feedback_issue(
feedback_data_2, self.project, FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE
)
self.store_replay(dt=now - timedelta(minutes=10), segment_id=0, trace_ids=[trace_id])
self.store_replay(dt=now - timedelta(minutes=1), segment_id=1, trace_ids=[trace_id_2])
# mock SDK feedback event with same event_id as the first feedback event
data = [
{
"type": 5,
"timestamp": float((now - timedelta(minutes=3)).timestamp()),
"data": {
"tag": "breadcrumb",
"payload": {
"category": "sentry.feedback",
"data": {"feedbackId": feedback_event_id},
},
},
},
]
self.save_recording_segment(0, json.dumps(data).encode())
# Mock fetch_feedback_details to return a dup of the first feedback event.
# In prod this is from nodestore. We had difficulties writing to nodestore in tests.
mock_fetch_feedback_details.return_value = EventDict(
id=feedback_event_id,
title="User Feedback",
message=feedback_data["contexts"]["feedback"]["message"],
timestamp=float(feedback_data["timestamp"]),
category="feedback",
)
response = rpc_get_replay_summary_logs(
self.project.id,
self.replay_id,
1,
)
logs = response["logs"]
# Verify that only the unique feedback logs are included
assert len(logs) == 2
assert "User submitted feedback" in logs[0]
assert "Great website" in logs[0]
assert "User submitted feedback" in logs[1]
assert "Broken website" in logs[1]
def test_rpc_mobile_replay_navigation(self) -> None:
"""Test that mobile replays are correctly detected and navigation events return log messages."""
now = datetime.now(UTC)
# Store a mobile replay with android platform
self.store_replay(dt=now, platform="android")
# Create segment data with a navigation event
data = [
{
"type": 5,
"timestamp": float(now.timestamp() * 1000),
"data": {
"tag": "breadcrumb",
"payload": {
"timestamp": float(now.timestamp()),
"type": "default",
"category": "navigation",
"data": {
"from": "/home",
"to": "/profile",
},
},
},
},
]
self.save_recording_segment(0, json.dumps(data).encode())
response = rpc_get_replay_summary_logs(
self.project.id,
self.replay_id,
1,
)
logs = response["logs"]
assert len(logs) == 1
assert "User navigated to: /profile" in logs[0]
assert str(float(now.timestamp() * 1000)) in logs[0]
def test_rpc_web_replay_navigation(self) -> None:
"""Test that web replays do not return navigation log messages."""
now = datetime.now(UTC)
# Store a web replay with javascript platform (default)
self.store_replay(dt=now, platform="javascript")
# Create segment data with a navigation event
data = [
{
"type": 5,
"timestamp": float(now.timestamp() * 1000),
"data": {
"tag": "breadcrumb",
"payload": {
"timestamp": float(now.timestamp()),
"type": "default",
"category": "navigation",
"data": {
"from": "https://example.com/home",
"to": "https://example.com/profile",
},
},
},
},
]
self.save_recording_segment(0, json.dumps(data).encode())
response = rpc_get_replay_summary_logs(
self.project.id,
self.replay_id,
1,
)
logs = response["logs"]
# Web replays should not include navigation events, so logs should be empty
assert len(logs) == 0
def test_rpc_filters_out_events_before_replay_start(self) -> None:
"""Test that both segment events and error events before replay start are filtered out."""
now = datetime.now(UTC)
replay_start = now - timedelta(minutes=1)
trace_id = uuid.uuid4().hex
span_id = "1" + uuid.uuid4().hex[:15]
# Create an error that occurred BEFORE replay start (should be filtered)
early_error_id = uuid.uuid4().hex
early_error_timestamp = (replay_start - timedelta(minutes=3)).timestamp()
self.store_event(
data={
"event_id": early_error_id,
"timestamp": early_error_timestamp,
"exception": {
"values": [
{
"type": "EarlyError",
"value": "This happened before replay started",
}
]
},
"contexts": {
"trace": {
"type": "trace",
"trace_id": trace_id,
"span_id": span_id,
}
},
},
project_id=self.project.id,
)
# Create an error that occurred AFTER replay start (should be included)
late_error_id = uuid.uuid4().hex
late_error_timestamp = (replay_start + timedelta(minutes=2)).timestamp()
self.store_event(
data={
"event_id": late_error_id,
"timestamp": late_error_timestamp,
"exception": {
"values": [
{
"type": "LateError",
"value": "This happened after replay started",
}
]
},
"contexts": {
"trace": {
"type": "trace",
"trace_id": trace_id,
"span_id": span_id,
}
},
},
project_id=self.project.id,
)
self.store_replay(dt=replay_start, segment_id=0, error_ids=[early_error_id, late_error_id])
data = [
{
"type": 5,
"timestamp": float((replay_start - timedelta(minutes=2)).timestamp() * 1000),
"data": {
"tag": "breadcrumb",
"payload": {
"category": "console",
"message": "hello",
},
},
},
{
"type": 5,
"timestamp": float((replay_start + timedelta(minutes=3)).timestamp() * 1000),
"data": {
"tag": "breadcrumb",
"payload": {
"category": "console",
"message": "world",
},
},
},
]
self.save_recording_segment(0, json.dumps(data).encode())
response = rpc_get_replay_summary_logs(
self.project.id,
self.replay_id,
1,
)
logs = response["logs"]
assert len(logs) == 2
# Should include the late error and the "world" console message
assert "LateError" in logs[0]
assert "This happened after replay started" in logs[0]
assert "world" in logs[1]
# Should NOT include the early error or "hello" console message
assert not any("EarlyError" in log for log in logs)
assert not any("This happened before replay started" in log for log in logs)
assert not any("hello" in log for log in logs)
| RpcGetReplaySummaryLogsTestCase |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofworkv2.py | {
"start": 73902,
"end": 75329
} | class ____(fixtures.MappedTest):
"""test [ticket:3167].
See also RefreshFlushInReturningTest in test/orm/test_events.py which
tests the positive case for the refresh_flush event, added in
[ticket:3427].
"""
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"test",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("prefetch_val", Integer, default=5),
Column("returning_val", Integer, server_default="5"),
)
@classmethod
def setup_classes(cls):
class Thing(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Thing = cls.classes.Thing
cls.mapper_registry.map_imperatively(
Thing, cls.tables.test, eager_defaults=True
)
def test_no_attr_events_flush(self):
Thing = self.classes.Thing
mock = Mock()
event.listen(Thing.id, "set", mock.id)
event.listen(Thing.prefetch_val, "set", mock.prefetch_val)
event.listen(Thing.returning_val, "set", mock.prefetch_val)
t1 = Thing()
s = fixture_session()
s.add(t1)
s.flush()
eq_(len(mock.mock_calls), 0)
eq_(t1.id, 1)
eq_(t1.prefetch_val, 5)
eq_(t1.returning_val, 5)
| NoAttrEventInFlushTest |
python | apache__airflow | providers/standard/src/airflow/providers/standard/operators/python.py | {
"start": 10449,
"end": 14888
} | class ____(PythonOperator, SkipMixin):
"""
Allows a pipeline to continue based on the result of a ``python_callable``.
The ShortCircuitOperator is derived from the PythonOperator and evaluates the result of a
``python_callable``. If the returned result is False or a falsy value, the pipeline will be
short-circuited. Downstream tasks will be marked with a state of "skipped" based on the short-circuiting
mode configured. If the returned result is True or a truthy value, downstream tasks proceed as normal and
an ``XCom`` of the returned result is pushed.
The short-circuiting can be configured to either respect or ignore the ``trigger_rule`` set for
downstream tasks. If ``ignore_downstream_trigger_rules`` is set to True, the default setting, all
downstream tasks are skipped without considering the ``trigger_rule`` defined for tasks. However, if this
parameter is set to False, the direct downstream tasks are skipped but the specified ``trigger_rule`` for
other subsequent downstream tasks are respected. In this mode, the operator assumes the direct downstream
tasks were purposely meant to be skipped but perhaps not other subsequent tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ShortCircuitOperator`
:param ignore_downstream_trigger_rules: If set to True, all downstream tasks from this operator task will
be skipped. This is the default behavior. If set to False, the direct, downstream task(s) will be
skipped but the ``trigger_rule`` defined for all other downstream tasks will be respected.
"""
inherits_from_skipmixin = True
def __init__(self, *, ignore_downstream_trigger_rules: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.ignore_downstream_trigger_rules = ignore_downstream_trigger_rules
def execute(self, context: Context) -> Any:
condition = super().execute(context)
self.log.info("Condition result is %s", condition)
if condition:
self.log.info("Proceeding with downstream tasks...")
return condition
if not self.downstream_task_ids:
self.log.info("No downstream tasks; nothing to do.")
return condition
dag_run = context["dag_run"]
def get_tasks_to_skip():
if self.ignore_downstream_trigger_rules is True:
tasks = context["task"].get_flat_relatives(upstream=False)
else:
tasks = context["task"].get_direct_relatives(upstream=False)
for t in tasks:
if not t.is_teardown:
yield t
to_skip = get_tasks_to_skip()
# this lets us avoid an intermediate list unless debug logging
if self.log.getEffectiveLevel() <= logging.DEBUG:
self.log.debug("Downstream task IDs %s", to_skip := list(get_tasks_to_skip()))
self.log.info("Skipping downstream tasks")
if AIRFLOW_V_3_0_PLUS:
self.skip(
ti=context["ti"],
tasks=to_skip,
)
else:
if to_skip:
self.skip(
dag_run=context["dag_run"],
tasks=to_skip,
execution_date=cast("DateTime", dag_run.logical_date), # type: ignore[call-arg]
map_index=context["ti"].map_index,
)
self.log.info("Done.")
# returns the result of the super execute method as it is instead of returning None
return condition
def _load_pickle():
import pickle
return pickle
def _load_dill():
try:
import dill
except ModuleNotFoundError:
log.error("Unable to import `dill` module. Please please make sure that it installed.")
raise
return dill
def _load_cloudpickle():
try:
import cloudpickle
except ModuleNotFoundError:
log.error(
"Unable to import `cloudpickle` module. "
"Please install it with: pip install 'apache-airflow[cloudpickle]'"
)
raise
return cloudpickle
_SERIALIZERS: dict[_SerializerTypeDef, Any] = {
"pickle": lazy_object_proxy.Proxy(_load_pickle),
"dill": lazy_object_proxy.Proxy(_load_dill),
"cloudpickle": lazy_object_proxy.Proxy(_load_cloudpickle),
}
| ShortCircuitOperator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes4.py | {
"start": 185,
"end": 365
} | class ____:
bar: str = "hi"
def __init__(self, val: str) -> None:
self.str = val
@classmethod
def method1(cls, val: str) -> None:
cls.str = val
| ClassA |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/hooks/databricks.py | {
"start": 8856,
"end": 29754
} | class ____(BaseDatabricksHook):
"""
Interact with Databricks.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
:param timeout_seconds: The amount of time in seconds the requests library
will wait before timing-out.
:param retry_limit: The number of times to retry the connection in case of
service outages.
:param retry_delay: The number of seconds to wait between retries (it
might be a floating point number).
:param retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
"""
hook_name = "Databricks"
def __init__(
self,
databricks_conn_id: str = BaseDatabricksHook.default_conn_name,
timeout_seconds: int = 180,
retry_limit: int = 3,
retry_delay: float = 1.0,
retry_args: dict[Any, Any] | None = None,
caller: str = "DatabricksHook",
) -> None:
super().__init__(databricks_conn_id, timeout_seconds, retry_limit, retry_delay, retry_args, caller)
def create_job(self, json: dict) -> int:
"""
Call the ``api/2.1/jobs/create`` endpoint.
:param json: The data used in the body of the request to the ``create`` endpoint.
:return: the job_id as an int
"""
response = self._do_api_call(CREATE_ENDPOINT, json)
return response["job_id"]
def reset_job(self, job_id: str, json: dict) -> None:
"""
Call the ``api/2.1/jobs/reset`` endpoint.
:param json: The data used in the new_settings of the request to the ``reset`` endpoint.
"""
access_control_list = json.get("access_control_list", None)
if access_control_list:
self.log.info(
"Updating job permission for Databricks workflow job id %s with access_control_list %s",
job_id,
access_control_list,
)
acl_json = {"access_control_list": access_control_list}
self.update_job_permission(job_id=int(job_id), json=acl_json)
self._do_api_call(RESET_ENDPOINT, {"job_id": job_id, "new_settings": json})
def update_job(self, job_id: str, json: dict) -> None:
"""
Call the ``api/2.1/jobs/update`` endpoint.
:param job_id: The id of the job to update.
:param json: The data used in the new_settings of the request to the ``update`` endpoint.
"""
self._do_api_call(UPDATE_ENDPOINT, {"job_id": job_id, "new_settings": json})
def run_now(self, json: dict) -> int:
"""
Call the ``api/2.1/jobs/run-now`` endpoint.
:param json: The data used in the body of the request to the ``run-now`` endpoint.
:return: the run_id as an int
"""
response = self._do_api_call(RUN_NOW_ENDPOINT, json)
return response["run_id"]
def submit_run(self, json: dict) -> int:
"""
Call the ``api/2.1/jobs/runs/submit`` endpoint.
:param json: The data used in the body of the request to the ``submit`` endpoint.
:return: the run_id as an int
"""
response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)
return response["run_id"]
def list_jobs(
self,
limit: int = 25,
expand_tasks: bool = False,
job_name: str | None = None,
page_token: str | None = None,
include_user_names: bool = False,
) -> list[dict[str, Any]]:
"""
List the jobs in the Databricks Job Service.
:param limit: The limit/batch size used to retrieve jobs.
:param expand_tasks: Whether to include task and cluster details in the response.
:param job_name: Optional name of a job to search.
:param page_token: The optional page token pointing at the first first job to return.
:return: A list of jobs.
"""
has_more = True
all_jobs = []
if page_token is None:
page_token = ""
while has_more:
payload: dict[str, Any] = {
"limit": limit,
"expand_tasks": expand_tasks,
"include_user_names": include_user_names,
}
payload["page_token"] = page_token
if job_name:
payload["name"] = job_name
response = self._do_api_call(LIST_JOBS_ENDPOINT, payload)
jobs = response.get("jobs", [])
if job_name:
all_jobs += [j for j in jobs if j["settings"]["name"] == job_name]
else:
all_jobs += jobs
has_more = response.get("has_more", False)
if has_more:
page_token = response.get("next_page_token", "")
return all_jobs
def find_job_id_by_name(self, job_name: str) -> int | None:
"""
Find job id by its name; if there are multiple jobs with the same name, raise AirflowException.
:param job_name: The name of the job to look up.
:return: The job_id as an int or None if no job was found.
"""
matching_jobs = self.list_jobs(job_name=job_name)
if len(matching_jobs) > 1:
raise AirflowException(
f"There are more than one job with name {job_name}. Please delete duplicated jobs first"
)
if not matching_jobs:
return None
return matching_jobs[0]["job_id"]
def list_pipelines(
self, batch_size: int = 25, pipeline_name: str | None = None, notebook_path: str | None = None
) -> list[dict[str, Any]]:
"""
List the pipelines in Databricks Delta Live Tables.
:param batch_size: The limit/batch size used to retrieve pipelines.
:param pipeline_name: Optional name of a pipeline to search. Cannot be combined with path.
:param notebook_path: Optional notebook of a pipeline to search. Cannot be combined with name.
:return: A list of pipelines.
"""
has_more = True
next_token = None
all_pipelines = []
filter = None
if pipeline_name and notebook_path:
raise AirflowException("Cannot combine pipeline_name and notebook_path in one request")
if notebook_path:
filter = f"notebook='{notebook_path}'"
elif pipeline_name:
filter = f"name LIKE '{pipeline_name}'"
payload: dict[str, Any] = {
"max_results": batch_size,
}
if filter:
payload["filter"] = filter
while has_more:
if next_token is not None:
payload = {**payload, "page_token": next_token}
response = self._do_api_call(LIST_PIPELINES_ENDPOINT, payload)
pipelines = response.get("statuses", [])
all_pipelines += pipelines
if "next_page_token" in response:
next_token = response["next_page_token"]
else:
has_more = False
return all_pipelines
def find_pipeline_id_by_name(self, pipeline_name: str) -> str | None:
"""
Find pipeline id by its name; if multiple pipelines with the same name, raise AirflowException.
:param pipeline_name: The name of the pipeline to look up.
:return: The pipeline_id as a GUID string or None if no pipeline was found.
"""
matching_pipelines = self.list_pipelines(pipeline_name=pipeline_name)
if len(matching_pipelines) > 1:
raise AirflowException(
f"There are more than one pipelines with name {pipeline_name}. "
"Please delete duplicated pipelines first"
)
if not pipeline_name or len(matching_pipelines) == 0:
return None
return matching_pipelines[0]["pipeline_id"]
def get_run_page_url(self, run_id: int) -> str:
"""
Retrieve run_page_url.
:param run_id: id of the run
:return: URL of the run page
"""
json = {"run_id": run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response["run_page_url"]
async def a_get_run_page_url(self, run_id: int) -> str:
"""
Async version of `get_run_page_url()`.
:param run_id: id of the run
:return: URL of the run page
"""
json = {"run_id": run_id}
response = await self._a_do_api_call(GET_RUN_ENDPOINT, json)
return response["run_page_url"]
def get_job_id(self, run_id: int) -> int:
"""
Retrieve job_id from run_id.
:param run_id: id of the run
:return: Job id for given Databricks run
"""
json = {"run_id": run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response["job_id"]
def get_run_state(self, run_id: int) -> RunState:
"""
Retrieve run state of the run.
Please note that any Airflow tasks that call the ``get_run_state`` method will result in
failure unless you have enabled xcom pickling. This can be done using the following
environment variable: ``AIRFLOW__CORE__ENABLE_XCOM_PICKLING``
If you do not want to enable xcom pickling, use the ``get_run_state_str`` method to get
a string describing state, or ``get_run_state_lifecycle``, ``get_run_state_result``, or
``get_run_state_message`` to get individual components of the run state.
:param run_id: id of the run
:return: state of the run
"""
json = {"run_id": run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
state = response["state"]
return RunState(**state)
async def a_get_run_state(self, run_id: int) -> RunState:
"""
Async version of `get_run_state()`.
:param run_id: id of the run
:return: state of the run
"""
json = {"run_id": run_id}
response = await self._a_do_api_call(GET_RUN_ENDPOINT, json)
state = response["state"]
return RunState(**state)
def get_run(self, run_id: int) -> dict[str, Any]:
"""
Retrieve run information.
:param run_id: id of the run
:return: state of the run
"""
json = {"run_id": run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response
async def a_get_run(self, run_id: int) -> dict[str, Any]:
"""
Async version of `get_run`.
:param run_id: id of the run
:return: state of the run
"""
json = {"run_id": run_id}
response = await self._a_do_api_call(GET_RUN_ENDPOINT, json)
return response
def get_run_state_str(self, run_id: int) -> str:
"""
Return the string representation of RunState.
:param run_id: id of the run
:return: string describing run state
"""
state = self.get_run_state(run_id)
run_state_str = (
f"State: {state.life_cycle_state}. Result: {state.result_state}. {state.state_message}"
)
return run_state_str
def get_run_state_lifecycle(self, run_id: int) -> str:
"""
Return the lifecycle state of the run.
:param run_id: id of the run
:return: string with lifecycle state
"""
return self.get_run_state(run_id).life_cycle_state
def get_run_state_result(self, run_id: int) -> str:
"""
Return the resulting state of the run.
:param run_id: id of the run
:return: string with resulting state
"""
return self.get_run_state(run_id).result_state
def get_run_state_message(self, run_id: int) -> str:
"""
Return the state message for the run.
:param run_id: id of the run
:return: string with state message
"""
return self.get_run_state(run_id).state_message
def get_run_output(self, run_id: int) -> dict:
"""
Retrieve run output of the run.
:param run_id: id of the run
:return: output of the run
"""
json = {"run_id": run_id}
run_output = self._do_api_call(OUTPUT_RUNS_JOB_ENDPOINT, json)
return run_output
async def a_get_run_output(self, run_id: int) -> dict:
"""
Async version of `get_run_output()`.
:param run_id: id of the run
:return: output of the run
"""
json = {"run_id": run_id}
run_output = await self._a_do_api_call(OUTPUT_RUNS_JOB_ENDPOINT, json)
return run_output
def cancel_run(self, run_id: int) -> None:
"""
Cancel the run.
:param run_id: id of the run
"""
json = {"run_id": run_id}
self._do_api_call(CANCEL_RUN_ENDPOINT, json)
def cancel_all_runs(self, job_id: int) -> None:
"""
Cancel all active runs of a job asynchronously.
:param job_id: The canonical identifier of the job to cancel all runs of
"""
json = {"job_id": job_id}
self._do_api_call(CANCEL_ALL_RUNS_ENDPOINT, json)
def delete_run(self, run_id: int) -> None:
"""
Delete a non-active run.
:param run_id: id of the run
"""
json = {"run_id": run_id}
self._do_api_call(DELETE_RUN_ENDPOINT, json)
def repair_run(self, json: dict) -> int:
"""
Re-run one or more tasks.
:param json: repair a job run.
"""
response = self._do_api_call(REPAIR_RUN_ENDPOINT, json)
return response["repair_id"]
def get_latest_repair_id(self, run_id: int) -> int | None:
"""Get latest repair id if any exist for run_id else None."""
json = {"run_id": run_id, "include_history": "true"}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
repair_history = response["repair_history"]
if len(repair_history) == 1:
return None
return repair_history[-1]["id"]
def get_cluster_state(self, cluster_id: str) -> ClusterState:
"""
Retrieve run state of the cluster.
:param cluster_id: id of the cluster
:return: state of the cluster
"""
json = {"cluster_id": cluster_id}
response = self._do_api_call(GET_CLUSTER_ENDPOINT, json)
state = response["state"]
state_message = response["state_message"]
return ClusterState(state, state_message)
async def a_get_cluster_state(self, cluster_id: str) -> ClusterState:
"""
Async version of `get_cluster_state`.
:param cluster_id: id of the cluster
:return: state of the cluster
"""
json = {"cluster_id": cluster_id}
response = await self._a_do_api_call(GET_CLUSTER_ENDPOINT, json)
state = response["state"]
state_message = response["state_message"]
return ClusterState(state, state_message)
def restart_cluster(self, json: dict) -> None:
"""
Restarts the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(RESTART_CLUSTER_ENDPOINT, json)
def start_cluster(self, json: dict) -> None:
"""
Start the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(START_CLUSTER_ENDPOINT, json)
def terminate_cluster(self, json: dict) -> None:
"""
Terminate the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(TERMINATE_CLUSTER_ENDPOINT, json)
def install(self, json: dict) -> None:
"""
Install libraries on the cluster.
Utility function to call the ``2.0/libraries/install`` endpoint.
:param json: json dictionary containing cluster_id and an array of library
"""
self._do_api_call(INSTALL_LIBS_ENDPOINT, json)
def uninstall(self, json: dict) -> None:
"""
Uninstall libraries on the cluster.
Utility function to call the ``2.0/libraries/uninstall`` endpoint.
:param json: json dictionary containing cluster_id and an array of library
"""
self._do_api_call(UNINSTALL_LIBS_ENDPOINT, json)
def update_repo(self, repo_id: str, json: dict[str, Any]) -> dict:
"""
Update given Databricks Repos.
:param repo_id: ID of Databricks Repos
:param json: payload
:return: metadata from update
"""
method, base_path = UPDATE_REPO_ENDPOINT
repos_endpoint = (method, f"{base_path}/{repo_id}")
return self._do_api_call(repos_endpoint, json)
def delete_repo(self, repo_id: str):
"""
Delete given Databricks Repos.
:param repo_id: ID of Databricks Repos
:return:
"""
method, base_path = DELETE_REPO_ENDPOINT
repos_endpoint = (method, f"{base_path}/{repo_id}")
self._do_api_call(repos_endpoint)
def create_repo(self, json: dict[str, Any]) -> dict:
"""
Create a Databricks Repos.
:param json: payload
:return:
"""
return self._do_api_call(CREATE_REPO_ENDPOINT, json)
def get_repo_by_path(self, path: str) -> str | None:
"""
Obtain Repos ID by path.
:param path: path to a repository
:return: Repos ID if it exists, None if doesn't.
"""
try:
result = self._do_api_call(WORKSPACE_GET_STATUS_ENDPOINT, {"path": path}, wrap_http_errors=False)
if result.get("object_type", "") == "REPO":
return str(result["object_id"])
except requests_exceptions.HTTPError as e:
if e.response.status_code != 404:
raise e
return None
def update_job_permission(self, job_id: int, json: dict[str, Any]) -> dict:
"""
Update databricks job permission.
:param job_id: job id
:param json: payload
:return: json containing permission specification
"""
return self._do_api_call(("PATCH", f"2.0/permissions/jobs/{job_id}"), json)
def post_sql_statement(self, json: dict[str, Any]) -> str:
"""
Submit a SQL statement to the Databricks SQL Statements endpoint.
:param json: The data used in the body of the request to the SQL Statements endpoint.
:return: The statement_id as a string.
"""
response = self._do_api_call(("POST", f"{SQL_STATEMENTS_ENDPOINT}"), json)
return response["statement_id"]
def get_sql_statement_state(self, statement_id: str) -> SQLStatementState:
"""
Retrieve run state of the SQL statement.
:param statement_id: ID of the SQL statement.
:return: state of the SQL statement.
"""
get_statement_endpoint = ("GET", f"{SQL_STATEMENTS_ENDPOINT}/{statement_id}")
response = self._do_api_call(get_statement_endpoint)
state = response["status"]["state"]
error_code = response["status"].get("error", {}).get("error_code", "")
error_message = response["status"].get("error", {}).get("message", "")
return SQLStatementState(state, error_code, error_message)
async def a_get_sql_statement_state(self, statement_id: str) -> SQLStatementState:
"""
Async version of `get_sql_statement_state`.
:param statement_id: ID of the SQL statement
:return: state of the SQL statement
"""
get_sql_statement_endpoint = ("GET", f"{SQL_STATEMENTS_ENDPOINT}/{statement_id}")
response = await self._a_do_api_call(get_sql_statement_endpoint)
state = response["status"]["state"]
error_code = response["status"].get("error", {}).get("error_code", "")
error_message = response["status"].get("error", {}).get("message", "")
return SQLStatementState(state, error_code, error_message)
def cancel_sql_statement(self, statement_id: str) -> None:
"""
Cancel the SQL statement.
:param statement_id: ID of the SQL statement
"""
self.log.info("Canceling SQL statement with ID: %s", statement_id)
cancel_sql_statement_endpoint = ("POST", f"{SQL_STATEMENTS_ENDPOINT}/{statement_id}/cancel")
self._do_api_call(cancel_sql_statement_endpoint)
def test_connection(self) -> tuple[bool, str]:
"""Test the Databricks connectivity from UI."""
hook = DatabricksHook(databricks_conn_id=self.databricks_conn_id)
try:
hook._do_api_call(endpoint_info=SPARK_VERSIONS_ENDPOINT).get("versions")
status = True
message = "Connection successfully tested"
except Exception as e:
status = False
message = str(e)
return status, message
| DatabricksHook |
python | apache__airflow | devel-common/src/sphinx_exts/docs_build/docs_builder.py | {
"start": 1427,
"end": 14233
} | class ____:
"""Documentation builder for Airflow."""
def __init__(self, package_name: str) -> None:
self.package_name = package_name
self.is_provider = False
self.is_airflow = False
self.is_chart = False
self.is_docker_stack = False
self.is_task_sdk = False
self.is_providers_summary = False
self.is_autobuild = False
if self.package_name.startswith("apache-airflow-providers-"):
self.package_id = self.package_name.split("apache-airflow-providers-", 1)[1].replace("-", ".")
self.provider_path = (AIRFLOW_CONTENT_ROOT_PATH / "providers").joinpath(
*self.package_id.split(".")
)
self.is_provider = True
if self.package_name == "apache-airflow":
self.is_airflow = True
if self.package_name == "helm-chart":
self.is_chart = True
if self.package_name == "task-sdk":
self.is_task_sdk = True
if self.package_name == "docker-stack":
self.is_docker_stack = True
if self.package_name == "apache-airflow-providers":
self.is_providers_summary = True
if self.package_name == "apache-airflow-ctl":
self.is_airflow_ctl = True
@property
def _doctree_dir(self) -> Path:
return GENERATED_PATH / "_doctrees" / "docs" / self.package_name
@property
def is_versioned(self):
"""Is current documentation package versioned?"""
# Disable versioning. This documentation does not apply to any released product and we can update
# it as needed, i.e. with each new package of providers.
return self.package_name not in ("apache-airflow-providers", "docker-stack")
@property
def _build_dir(self) -> Path:
if self.is_versioned:
version = "stable"
return GENERATED_PATH / "_build" / "docs" / self.package_name / version
return GENERATED_PATH / "_build" / "docs" / self.package_name
@property
def log_spelling_filename(self) -> Path:
"""Log from spelling job."""
return self._build_dir / f"output-spelling-{self.package_name}.log"
@property
def log_spelling_output_dir(self) -> Path:
"""Results from spelling job."""
return self._build_dir / f"output-spelling-results-{self.package_name}"
@property
def log_build_filename(self) -> Path:
"""Log from build job."""
return self._build_dir / f"output-build-{self.package_name}.log"
@property
def log_build_warning_filename(self) -> Path:
"""Warnings from build job."""
return self._build_dir / f"warning-build-{self.package_name}.log"
@property
def _src_dir(self) -> Path:
if self.package_name == "helm-chart":
return AIRFLOW_CONTENT_ROOT_PATH / "chart" / "docs"
if self.package_name == "apache-airflow":
return AIRFLOW_CONTENT_ROOT_PATH / "airflow-core" / "docs"
if self.package_name == "docker-stack":
return AIRFLOW_CONTENT_ROOT_PATH / "docker-stack-docs"
if self.package_name == "apache-airflow-providers":
return AIRFLOW_CONTENT_ROOT_PATH / "providers-summary-docs"
if self.package_name.startswith("apache-airflow-providers-"):
package_paths = self.package_name[len("apache-airflow-providers-") :].split("-")
return (AIRFLOW_CONTENT_ROOT_PATH / "providers").joinpath(*package_paths) / "docs"
if self.package_name == "apache-airflow-ctl":
return AIRFLOW_CONTENT_ROOT_PATH / "airflow-ctl" / "docs"
if self.package_name == "task-sdk":
return AIRFLOW_CONTENT_ROOT_PATH / "task-sdk" / "docs"
console.print(f"[red]Unknown package name: {self.package_name}")
sys.exit(1)
@property
def pythonpath(self) -> list[Path]:
path = []
if (self._src_dir.parent / "tests").exists():
path.append(self._src_dir.parent.joinpath("tests").resolve())
return path
@property
def _generated_api_dir(self) -> Path:
return self._build_dir.resolve() / "_api"
@property
def _api_dir(self) -> Path:
return self._src_dir.resolve() / "_api"
def clean_files(self) -> None:
"""Cleanup all artifacts generated by previous builds."""
shutil.rmtree(self._api_dir, ignore_errors=True)
shutil.rmtree(self._build_dir, ignore_errors=True)
shutil.rmtree(self._doctree_dir, ignore_errors=True)
self._api_dir.mkdir(parents=True, exist_ok=True)
self._build_dir.mkdir(parents=True, exist_ok=True)
def check_spelling(self, verbose: bool) -> tuple[list[SpellingError], list[DocBuildError]]:
"""
Checks spelling
:param verbose: whether to show output while running
:return: list of errors
"""
spelling_errors = []
build_errors = []
os.makedirs(self._build_dir, exist_ok=True)
shutil.rmtree(self.log_spelling_output_dir, ignore_errors=True)
self.log_spelling_output_dir.mkdir(parents=True, exist_ok=True)
command = self.get_command()
build_cmd = [
command,
"-W", # turn warnings into errors
"--color", # do emit colored output
"-T", # show full traceback on exception
"-b", # builder to use
"spelling",
"-d", # path for the cached environment and doctree files
self._doctree_dir.as_posix(),
# documentation source files
self._src_dir.as_posix(),
self.log_spelling_output_dir.as_posix(),
]
if os.environ.get("CI", "") != "true" and verbose:
console.print("[yellow]Command to run:[/] ", " ".join([shlex.quote(arg) for arg in build_cmd]))
env = os.environ.copy()
env["AIRFLOW_PACKAGE_NAME"] = self.package_name
if self.pythonpath:
env["PYTHONPATH"] = ":".join([path.as_posix() for path in self.pythonpath])
if verbose:
console.print(
f"[bright_blue]{self.package_name:60}:[/] The output is hidden until an error occurs."
)
with open(self.log_spelling_filename, "w") as output:
completed_proc = run(
build_cmd,
check=False,
cwd=AIRFLOW_CONTENT_ROOT_PATH,
env=env,
stdout=output if not verbose else None,
stderr=output if not verbose else None,
timeout=PROCESS_TIMEOUT,
)
if completed_proc.returncode != 0:
spelling_errors.append(
SpellingError(
file_path=None,
line_no=None,
spelling=None,
suggestion=None,
context_line=None,
message=(
f"Sphinx spellcheck returned non-zero exit status: {completed_proc.returncode}."
),
)
)
spelling_warning_text = ""
for filepath in self.log_spelling_output_dir.rglob("*.spelling"):
with open(filepath) as spelling_file:
spelling_warning_text += spelling_file.read()
spelling_errors.extend(parse_spelling_warnings(spelling_warning_text, self._src_dir))
if os.path.isfile(self.log_spelling_filename):
with open(self.log_spelling_filename) as warning_file:
warning_text = warning_file.read()
# Remove 7-bit C1 ANSI escape sequences
warning_text = re.sub(r"\x1B[@-_][0-?]*[ -/]*[@-~]", "", warning_text)
build_errors.extend(parse_sphinx_warnings(warning_text, self._src_dir))
console.print(
f"[bright_blue]{self.package_name:60}:[/] [red]Finished spell-checking with errors[/]"
)
else:
if spelling_errors:
console.print(
f"[bright_blue]{self.package_name:60}:[/] [yellow]Finished spell-checking with warnings[/]"
)
else:
console.print(
f"[bright_blue]{self.package_name:60}:[/] [green]Finished spell-checking successfully[/]"
)
return spelling_errors, build_errors
def build_sphinx_docs(self, verbose: bool) -> list[DocBuildError]:
"""
Build Sphinx documentation.
:param verbose: whether to show output while running
:return: list of errors
"""
build_errors = []
os.makedirs(self._build_dir, exist_ok=True)
command = self.get_command()
build_cmd = [
command,
"-T", # show full traceback on exception
"--color", # do emit colored output
"-b", # builder to use
"html",
"-d", # path for the cached environment and doctree files
self._doctree_dir.as_posix(),
"-w", # write warnings (and errors) to given file
self.log_build_warning_filename.as_posix(),
# documentation source files
self._src_dir.as_posix(),
self._build_dir.as_posix(), # path to output directory
]
if os.environ.get("CI", "") != "true" and verbose:
console.print("[yellow]Command to run:[/] ", " ".join([shlex.quote(arg) for arg in build_cmd]))
env = os.environ.copy()
env["AIRFLOW_PACKAGE_NAME"] = self.package_name
if self.pythonpath:
env["PYTHONPATH"] = ":".join([path.as_posix() for path in self.pythonpath])
if verbose:
console.print(
f"[bright_blue]{self.package_name:60}:[/] Running sphinx. "
f"The output is hidden until an error occurs."
)
with open(self.log_build_filename, "w") as output:
completed_proc = run(
build_cmd,
check=False,
cwd=AIRFLOW_CONTENT_ROOT_PATH,
env=env,
stdout=output if not verbose else None,
stderr=output if not verbose else None,
timeout=PROCESS_TIMEOUT,
)
if completed_proc.returncode != 0:
build_errors.append(
DocBuildError(
file_path=None,
line_no=None,
message=f"Sphinx returned non-zero exit status: {completed_proc.returncode}.",
)
)
if self.log_build_warning_filename.is_file():
warning_text = self.log_build_warning_filename.read_text()
# Remove 7-bit C1 ANSI escape sequences
warning_text = re.sub(r"\x1B[@-_][0-?]*[ -/]*[@-~]", "", warning_text)
build_errors.extend(parse_sphinx_warnings(warning_text, self._src_dir))
if build_errors:
console.print(
f"[bright_blue]{self.package_name:60}:[/] [red]Finished docs building with errors[/]"
)
else:
console.print(
f"[bright_blue]{self.package_name:60}:[/] [green]Finished docs building successfully[/]"
)
return build_errors
def get_command(self) -> str:
return "sphinx-autobuild" if self.is_autobuild else "sphinx-build"
def get_available_providers_distributions(include_suspended: bool = False):
"""Get list of all available providers packages to build."""
return [
provider["package-name"]
for provider in (ALL_PROVIDER_YAMLS_WITH_SUSPENDED if include_suspended else ALL_PROVIDER_YAMLS)
]
def get_short_form(package_name: str) -> str | None:
if package_name.startswith("apache-airflow-providers-"):
return package_name.replace("apache-airflow-providers-", "").replace("-", ".")
return None
def get_long_form(package_name: str) -> str | None:
if package_name in get_available_packages():
return package_name
long_form = "apache-airflow-providers-" + package_name.replace(".", "-")
if long_form not in get_available_packages():
return None
return long_form
def get_available_packages(include_suspended: bool = False, short_form: bool = False) -> list[str]:
"""Get list of all available packages to build."""
provider_names = get_available_providers_distributions(include_suspended=include_suspended)
if short_form:
provider_names = [get_short_form(name) for name in provider_names]
return [
"apache-airflow",
*provider_names,
"apache-airflow-providers",
"apache-airflow-ctl",
"task-sdk",
"helm-chart",
"docker-stack",
]
| AirflowDocsBuilder |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/types.py | {
"start": 2002,
"end": 2931
} | class ____(
NamedTuple(
"_DaemonStatus",
[
("daemon_type", str),
("required", bool),
("healthy", Optional[bool]),
("last_heartbeat", Optional[DaemonHeartbeat]),
],
)
):
"""Daemon statuses are derived from daemon heartbeats and instance configuration to provide an
overview about the daemon's liveness.
"""
def __new__(
cls,
daemon_type: str,
required: bool,
healthy: Optional[bool],
last_heartbeat: Optional[DaemonHeartbeat],
):
return super().__new__(
cls,
daemon_type=check.str_param(daemon_type, "daemon_type"),
required=check.bool_param(required, "required"),
healthy=check.opt_bool_param(healthy, "healthy"),
last_heartbeat=check.opt_inst_param(last_heartbeat, "last_heartbeat", DaemonHeartbeat),
)
| DaemonStatus |
python | huggingface__transformers | src/transformers/models/doge/modular_doge.py | {
"start": 30270,
"end": 34129
} | class ____(MixtralForCausalLM):
def __init__(self, config):
super().__init__(config)
self.model = DogeModel(config)
self.num_experts = config.num_experts
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
output_router_logits: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, DogeForCausalLM
>>> model = DogeForCausalLM.from_pretrained("SmallDoge/Doge-320M")
>>> tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-320M")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
math.floor(math.sqrt(self.num_experts)),
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
| DogeForCausalLM |
python | gevent__gevent | src/gevent/_socketcommon.py | {
"start": 14114,
"end": 26066
} | class ____(object):
# pylint:disable=too-many-public-methods
__slots__ = (
'hub',
'timeout',
'_read_event',
'_write_event',
'_sock',
'__weakref__',
)
def __init__(self):
# Writing:
# (self.a, self.b) = (None,) * 2
# generates the fastest bytecode. But At least on PyPy,
# where the SSLSocket subclass has a timeout property,
# it results in the settimeout() method getting the tuple
# as the value, not the unpacked None.
self._read_event = None
self._write_event = None
self._sock = None
self.hub = None
self.timeout = None
def _drop_events_and_close(self, closefd=True, _cancel_wait_ex=cancel_wait_ex):
hub = self.hub
read_event = self._read_event
write_event = self._write_event
self._read_event = self._write_event = None
hub.cancel_waits_close_and_then(
(read_event, write_event),
_cancel_wait_ex,
# Pass the socket to keep it alive until such time as
# the waiters are guaranteed to be closed.
self._drop_ref_on_close if closefd else id,
self._sock
)
def _drop_ref_on_close(self, sock):
raise NotImplementedError
def _get_ref(self):
return self._read_event.ref or self._write_event.ref
def _set_ref(self, value):
self._read_event.ref = value
self._write_event.ref = value
ref = property(_get_ref, _set_ref)
_wait = _wait_on_socket
###
# Common methods defined here need to be added to the
# API documentation specifically.
###
def settimeout(self, howlong):
if howlong is not None:
try:
f = howlong.__float__
except AttributeError:
raise TypeError('a float is required', howlong, type(howlong))
howlong = f()
if howlong < 0.0:
raise ValueError('Timeout value out of range')
# avoid recursion with any property on self.timeout
SocketMixin.timeout.__set__(self, howlong)
def gettimeout(self):
# avoid recursion with any property on self.timeout
return SocketMixin.timeout.__get__(self, type(self))
def setblocking(self, flag):
# Beginning in 3.6.0b3 this is supposed to raise
# if the file descriptor is closed, but the test for it
# involves closing the fileno directly. Since we
# don't touch the fileno here, it doesn't make sense for
# us.
if flag:
self.timeout = None
else:
self.timeout = 0.0
def shutdown(self, how):
if how == 0: # SHUT_RD
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
elif how == 1: # SHUT_WR
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
else:
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
self._sock.shutdown(how)
# pylint:disable-next=undefined-variable
family = property(lambda self: _intenum_converter(self._sock.family, AddressFamily))
# pylint:disable-next=undefined-variable
type = property(lambda self: _intenum_converter(self._sock.type, SocketKind))
proto = property(lambda self: self._sock.proto)
def fileno(self):
return self._sock.fileno()
def getsockname(self):
return self._sock.getsockname()
def getpeername(self):
return self._sock.getpeername()
def bind(self, address):
return self._sock.bind(address)
def listen(self, *args):
return self._sock.listen(*args)
def getsockopt(self, *args):
return self._sock.getsockopt(*args)
def setsockopt(self, *args):
return self._sock.setsockopt(*args)
if hasattr(__socket__.socket, 'ioctl'): # os.name == 'nt'
def ioctl(self, *args):
return self._sock.ioctl(*args)
if hasattr(__socket__.socket, 'sleeptaskw'): # os.name == 'riscos
def sleeptaskw(self, *args):
return self._sock.sleeptaskw(*args)
def getblocking(self):
"""
Returns whether the socket will approximate blocking
behaviour.
.. versionadded:: 1.3a2
Added in Python 3.7.
"""
return self.timeout != 0.0
def connect(self, address):
"""
Connect to *address*.
.. versionchanged:: 20.6.0
If the host part of the address includes an IPv6 scope ID,
it will be used instead of ignored, if the platform supplies
:func:`socket.inet_pton`.
"""
# In the standard library, ``connect`` and ``connect_ex`` are implemented
# in C, and they both call a C function ``internal_connect`` to do the real
# work. This means that it is a visible behaviour difference to have our
# Python implementation of ``connect_ex`` simply call ``connect``:
# it could be overridden in a subclass or at runtime! Because of our exception handling,
# this can make a difference for known subclasses like SSLSocket.
self._internal_connect(address)
def connect_ex(self, address):
"""
Connect to *address*, returning a result code.
.. versionchanged:: 23.7.0
No longer uses an overridden ``connect`` method on
this object. Instead, like the standard library, this method always
uses a non-replacable internal connection function.
"""
try:
return self._internal_connect(address) or 0
except __socket__.timeout:
return EAGAIN
except __socket__.gaierror: # pylint:disable=try-except-raise
# gaierror/overflowerror/typerror is not silenced by connect_ex;
# gaierror extends error so catch it first
raise
except _SocketError as ex:
# Python 3: error is now OSError and it has various subclasses.
# Only those that apply to actually connecting are silenced by
# connect_ex.
# On Python 3, we want to check ex.errno; on Python 2
# there is no such attribute, we need to look at the first
# argument.
try:
err = ex.errno
except AttributeError:
err = ex.args[0]
if err:
return err
raise
def _internal_connect(self, address):
# Like the C function ``internal_connect``, not meant to be overridden,
# but exposed for testing.
if self.timeout == 0.0:
return self._sock.connect(address)
address = _resolve_addr(self._sock, address)
with Timeout._start_new_or_dummy(self.timeout, __socket__.timeout("timed out")):
while 1:
err = self.getsockopt(__socket__.SOL_SOCKET, __socket__.SO_ERROR)
if err:
raise _SocketError(err, strerror(err))
result = self._sock.connect_ex(address)
if not result or result == EISCONN:
break
if (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows):
self._wait(self._write_event)
else:
if (isinstance(address, tuple)
and address[0] == 'fe80::1'
and result == EHOSTUNREACH):
# On Python 3.7 on mac, we see EHOSTUNREACH
# returned for this link-local address, but it really is
# supposed to be ECONNREFUSED according to the standard library
# tests (test_socket.NetworkConnectionNoServer.test_create_connection)
# (On previous versions, that code passed the '127.0.0.1' IPv4 address, so
# ipv6 link locals were never a factor; 3.7 passes 'localhost'.)
# It is something of a mystery how the stdlib socket code doesn't
# produce EHOSTUNREACH---I (JAM) can't see how socketmodule.c would avoid
# that. The normal connect just calls connect_ex much like we do.
result = ECONNREFUSED
raise _SocketError(result, strerror(result))
def recv(self, *args):
while 1:
try:
return self._sock.recv(*args)
except _SocketError as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
# QQQ without clearing exc_info test__refcount.test_clean_exit fails
exc_clear() # Python 2
self._wait(self._read_event)
def recvfrom(self, *args):
while 1:
try:
return self._sock.recvfrom(*args)
except _SocketError as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
exc_clear() # Python 2
self._wait(self._read_event)
def recvfrom_into(self, *args):
while 1:
try:
return self._sock.recvfrom_into(*args)
except _SocketError as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
exc_clear() # Python 2
self._wait(self._read_event)
def recv_into(self, *args):
while 1:
try:
return self._sock.recv_into(*args)
except _SocketError as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
exc_clear() # Python 2
self._wait(self._read_event)
def sendall(self, data, flags=0):
# this sendall is also reused by gevent.ssl.SSLSocket subclass,
# so it should not call self._sock methods directly
data_memory = _get_memory(data)
return _sendall(self, data_memory, flags)
def sendto(self, *args):
try:
return self._sock.sendto(*args)
except _SocketError as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
exc_clear()
self._wait(self._write_event)
try:
return self._sock.sendto(*args)
except _SocketError as ex2:
if ex2.args[0] == EWOULDBLOCK:
exc_clear()
return 0
raise
def send(self, data, flags=0, timeout=timeout_default):
if timeout is timeout_default:
timeout = self.timeout
try:
return self._sock.send(data, flags)
except _SocketError as ex:
if ex.args[0] not in GSENDAGAIN or timeout == 0.0:
raise
exc_clear()
self._wait(self._write_event)
try:
return self._sock.send(data, flags)
except _SocketError as ex2:
if ex2.args[0] == EWOULDBLOCK:
exc_clear()
return 0
raise
@classmethod
def _fixup_docstrings(cls):
for k, v in vars(cls).items():
if k.startswith('_'):
continue
if not hasattr(v, '__doc__') or v.__doc__:
continue
smeth = getattr(__socket__.socket, k, None)
if not smeth or not smeth.__doc__:
continue
try:
v.__doc__ = smeth.__doc__
except (AttributeError, TypeError):
# slots can't have docs. Py2 raises TypeError,
# Py3 raises AttributeError
continue
SocketMixin._fixup_docstrings()
del SocketMixin._fixup_docstrings
| SocketMixin |
python | tensorflow__tensorflow | tensorflow/compiler/tests/fake_quant_ops_test.py | {
"start": 17388,
"end": 21207
} | class ____(xla_test.XLATestCase):
"""Test cases for FakeQuantWithMinMaxVarsPerChannel operation."""
# 8 bits, wide range.
def testOp_with8Bits(self):
self._TestOp(
[0.0, 0.5, -128.0, -0.1],
[255.0, 128.0, -0.5, 127.4],
8,
False,
[0.0, 0.0, -127.5, 0.0],
[255.0, 127.5, 0.0, 127.5],
[1.0, 0.5, 0.5, 0.5])
# 8 bits, narrow range.
def testOp_with8BitsNarrowRange(self):
self._TestOp(
[0.0, 0.1, -127.1, -0.1],
[254.0, 127.1, -0.1, 126.9],
8,
True,
[0.0, 0.0, -127.0, 0.0],
[254.0, 127.0, 0.0, 127.0],
[1.0, 0.5, 0.5, 0.5])
# 7 bits, wide range.
def testOp_with7Bits(self):
self._TestOp(
[0.0, 0.5, -64.0, -0.1],
[127.0, 64.0, -0.5, 63.4],
7,
False,
[0.0, 0.0, -63.5, 0.0],
[127.0, 63.5, 0.0, 63.5],
[1.0, 0.5, 0.5, 0.5])
# 7 bits, narrow range.
def testOp_with7BitsNarrowRange(self):
self._TestOp(
[0.0, 0.1, -63.1, -0.1],
[126.0, 63.1, -0.1, 62.9],
7,
True,
[0.0, 0.0, -63.0, 0.0],
[126.0, 63.0, 0.0, 63.0],
[1.0, 0.5, 0.5, 0.5])
def _TestOp(self, input_mins, input_maxs, num_bits, narrow_range,
expected_nudged_input_mins, expected_nudged_input_maxs,
expected_steps):
num_channels = len(input_mins)
inputs_list = []
expected_list = []
for i in range(num_channels):
expected_nudged_input_min = expected_nudged_input_mins[i]
expected_nudged_input_max = expected_nudged_input_maxs[i]
expected_step = expected_steps[i]
inputs_list.append(
[
expected_nudged_input_min - expected_step,
expected_nudged_input_min - 0.01, expected_nudged_input_min,
expected_nudged_input_min + 0.01,
expected_nudged_input_min + expected_step - 0.01,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step + 0.01,
expected_nudged_input_max - 0.01, expected_nudged_input_max,
expected_nudged_input_max + 0.01,
expected_nudged_input_max + expected_step
])
expected_list.append(
[
expected_nudged_input_min, expected_nudged_input_min,
expected_nudged_input_min, expected_nudged_input_min,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_max, expected_nudged_input_max,
expected_nudged_input_max, expected_nudged_input_max
])
inputs = np.transpose(np.array(inputs_list, dtype=np.float32))
expected = np.transpose(np.array(expected_list, dtype=np.float32))
with self.session() as session:
with self.test_scope():
input_placeholder = array_ops.placeholder(
dtypes.float32, inputs.shape, name="inputs")
min_placeholder = array_ops.placeholder(
dtypes.float32, (num_channels), name="min")
max_placeholder = array_ops.placeholder(
dtypes.float32, (num_channels), name="max")
outputs = array_ops.fake_quant_with_min_max_vars_per_channel(
input_placeholder,
min_placeholder,
max_placeholder,
num_bits=num_bits,
narrow_range=narrow_range)
result = session.run(
outputs, {
input_placeholder: inputs,
min_placeholder: input_mins,
max_placeholder: input_maxs
})
self.assertAllCloseAccordingToType(
result, expected, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)
| FakeQuantWithMinMaxVarsPerChannelTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox14.py | {
"start": 315,
"end": 868
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox14.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox("E9", "This is some text", {"border": {"none": True}})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 246694,
"end": 249853
} | class ____(Operation):
def __init__(self, indices_or_sections, axis=0, *, name=None):
super().__init__(name=name)
self.indices_or_sections = indices_or_sections
self.axis = axis
def call(self, x):
return backend.numpy.array_split(
x,
indices_or_sections=self.indices_or_sections,
axis=self.axis,
)
def compute_output_spec(self, x):
num_splits = self.indices_or_sections
axis = self.axis
if axis < 0:
axis += len(x.shape)
total_size = x.shape[axis]
if total_size is None:
output_specs = []
base_shape = list(x.shape)
base_shape[axis] = None
for _ in range(num_splits):
output_specs.append(
KerasTensor(shape=tuple(base_shape), dtype=x.dtype)
)
return tuple(output_specs)
split_size = total_size // num_splits
remainder = total_size % num_splits
output_specs = []
base_shape = list(x.shape)
for i in range(num_splits):
size = split_size + (1 if i < remainder else 0)
shape = base_shape.copy()
shape[axis] = size
output_specs.append(KerasTensor(shape=tuple(shape), dtype=x.dtype))
return list(output_specs)
@keras_export(["keras.ops.array_split", "keras.ops.numpy.array_split"])
def array_split(x, indices_or_sections, axis=0):
"""Splits an array into multiple sub-arrays (unevenly).
This is similar to `keras.ops.split`, but it allows for
unequal splits. `indices_or_sections` must be an integer
that indicates the total number of sub-arrays to create.
If the tensor cannot be divided evenly, the first `remainder`
splits will have size `quotient + 1`, and the rest will
have size `quotient`.
Args:
x: Input tensor.
indices_or_sections: An integer indicating the number of
sub-arrays to create.
axis: The axis along which to split. Defaults to 0.
Returns:
A list of sub-tensors.
Example:
>>> x = keras.ops.arange(10)
>>> keras.ops.array_split(x, 3)
(array([0, 1, 2, 3], dtype=int32),
array([4, 5, 6], dtype=int32),
array([7, 8, 9], dtype=int32))
"""
if not isinstance(indices_or_sections, int):
raise TypeError(
"Argument `indices_or_sections` must be of type `int`. "
f"Received: indices_or_sections={indices_or_sections}"
)
if indices_or_sections <= 0:
raise ValueError(
"Argument `indices_or_sections` must be a positive integer. "
f"Received: indices_or_sections={indices_or_sections}"
)
if not isinstance(axis, int):
raise TypeError(
f"Argument `axis` must be of type `int`. Received: {axis}"
)
if any_symbolic_tensors((x,)):
return ArraySplit(
indices_or_sections=indices_or_sections, axis=axis
).symbolic_call(x)
return backend.numpy.array_split(
x, indices_or_sections=indices_or_sections, axis=axis
)
| ArraySplit |
python | tensorflow__tensorflow | tensorflow/python/saved_model/fingerprinting_test.py | {
"start": 1686,
"end": 8805
} | class ____(test.TestCase):
def _create_saved_model(self):
root = autotrackable.AutoTrackable()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
self.addCleanup(shutil.rmtree, save_dir)
return save_dir
def _create_model_with_function(self):
root = autotrackable.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
return root
def _create_model_with_input_signature(self):
root = autotrackable.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
return root
def _create_model_with_data(self):
root = autotrackable.AutoTrackable()
root.x = constant_op.constant(1.0, dtype=dtypes.float32)
root.f = def_function.function(
lambda x: root.x * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
return root
def _read_fingerprint(self, filename):
fingerprint_def = fingerprint_pb2.FingerprintDef()
with file_io.FileIO(filename, "rb") as f:
fingerprint_def.ParseFromString(f.read())
return fingerprint_def
def setUp(self):
super().setUp()
flags.config().saved_model_fingerprinting.reset(True)
def test_basic_module(self):
save_dir = self._create_saved_model()
files = file_io.list_directory_v2(save_dir)
self.assertLen(files, 4)
self.assertIn(constants.FINGERPRINT_FILENAME, files)
fingerprint_def = self._read_fingerprint(
file_io.join(save_dir, constants.FINGERPRINT_FILENAME))
# We cannot check this value due to non-determinism in saving.
self.assertGreater(fingerprint_def.saved_model_checksum, 0)
self.assertEqual(fingerprint_def.graph_def_program_hash,
14830488309055091319)
self.assertEqual(fingerprint_def.signature_def_hash, 12089566276354592893)
self.assertEqual(fingerprint_def.saved_object_graph_hash, 0)
# TODO(b/242348400): The checkpoint hash is non-deterministic, so we cannot
# check its value here.
self.assertGreater(fingerprint_def.checkpoint_hash, 0)
def test_model_saved_with_different_signature_options(self):
model = self._create_model_with_function()
# Save the model with signatures specified in SaveOptions.
sig_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
model,
sig_dir,
signatures=model.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32)))
# Save the model without signatures.
no_sig_dir = os.path.join(self.get_temp_dir(), "saved_model2")
save.save(model, no_sig_dir)
# Save the model with an input signature specified.
input_sig_dir = os.path.join(self.get_temp_dir(), "saved_model3")
save.save(self._create_model_with_input_signature(), input_sig_dir)
fingerprint_sig = self._read_fingerprint(
file_io.join(sig_dir, constants.FINGERPRINT_FILENAME))
fingerprint_no_sig = self._read_fingerprint(
file_io.join(no_sig_dir, constants.FINGERPRINT_FILENAME))
fingerprint_input_sig = self._read_fingerprint(
file_io.join(input_sig_dir, constants.FINGERPRINT_FILENAME))
# Check that the model saved with different options has different
# SignatureDef hashes.
self.assertNotEqual(fingerprint_sig.signature_def_hash,
fingerprint_no_sig.signature_def_hash)
# Check that the model saved with the same concrete function has the same
# regularized hashes.
self.assertEqual(fingerprint_sig.graph_def_program_hash,
fingerprint_input_sig.graph_def_program_hash)
self.assertEqual(fingerprint_sig.signature_def_hash,
fingerprint_input_sig.signature_def_hash)
def test_read_fingerprint_api(self):
save_dir = self._create_saved_model()
fingerprint = fingerprinting.read_fingerprint(save_dir)
fingerprint_def = self._read_fingerprint(
file_io.join(save_dir, constants.FINGERPRINT_FILENAME))
self.assertEqual(fingerprint, fingerprint_def)
def test_read_fingerprint_file_not_found(self):
with self.assertRaisesRegex(FileNotFoundError,
"SavedModel Fingerprint Error"):
fingerprinting.read_fingerprint("foo")
def test_write_fingerprint(self):
save_dir = os.path.join(self.get_temp_dir(), "model_and_fingerprint")
save.save_and_return_nodes(
self._create_model_with_data(), save_dir,
experimental_skip_checkpoint=True) # checkpoint data won't be loaded
fingerprint_def = fingerprinting.read_fingerprint(save_dir)
# We cannot check this value due to non-determinism in serialization.
self.assertGreater(fingerprint_def.saved_model_checksum, 0)
self.assertEqual(fingerprint_def.graph_def_program_hash,
8947653168630125217)
self.assertEqual(fingerprint_def.signature_def_hash, 15354238402988963670)
self.assertEqual(fingerprint_def.checkpoint_hash, 0)
def test_valid_singleprint(self):
save_dir = os.path.join(self.get_temp_dir(), "singleprint_model")
save.save(self._create_model_with_data(), save_dir)
fingerprint = fingerprinting.read_fingerprint(save_dir)
singleprint = fingerprint.singleprint()
# checkpoint_hash is non-deterministic and not included
self.assertRegex(singleprint,
"/".join(["8947653168630125217", # graph_def_program_hash
"15354238402988963670", # signature_def_hash
"1613952301283913051" # saved_object_graph_hash
]))
def test_invalid_singleprint(self):
fingerprint = fingerprinting.Fingerprint()
with self.assertRaisesRegex(ValueError,
"Encounted invalid fingerprint values"):
fingerprint.singleprint()
def test_valid_from_proto(self):
save_dir = os.path.join(self.get_temp_dir(), "from_proto_model")
save.save(self._create_model_with_data(), save_dir)
fingerprint_def = fingerprint_pb2.FingerprintDef().FromString(
fingerprinting_pywrap.ReadSavedModelFingerprint(save_dir))
fingerprint = fingerprinting.Fingerprint.from_proto(fingerprint_def)
self.assertEqual(fingerprint, fingerprint_def)
def test_invalid_from_proto(self):
save_dir = os.path.join(self.get_temp_dir(), "from_proto_model")
save.save(self._create_model_with_data(), save_dir)
wrong_def = saved_model_pb2.SavedModel(
saved_model_schema_version=1)
with self.assertRaisesRegex(ValueError,
"Given proto could not be deserialized as"):
fingerprinting.Fingerprint.from_proto(wrong_def)
def test_fingerprint_to_proto(self):
save_dir = os.path.join(self.get_temp_dir(), "from_proto_model")
save.save(self._create_model_with_data(), save_dir)
fingerprint = fingerprinting.read_fingerprint(save_dir)
fingerprint_def = fingerprinting_utils.to_proto(fingerprint)
self.assertEqual(fingerprint, fingerprint_def)
if __name__ == "__main__":
test.main()
| FingerprintingTest |
python | sympy__sympy | sympy/core/tests/test_operations.py | {
"start": 412,
"end": 2859
} | class ____(LatticeOp):
zero = Integer(0)
identity = Integer(1)
def test_lattice_simple():
assert join(join(2, 3), 4) == join(2, join(3, 4))
assert join(2, 3) == join(3, 2)
assert join(0, 2) == 0
assert join(1, 2) == 2
assert join(2, 2) == 2
assert join(join(2, 3), 4) == join(2, 3, 4)
assert join() == 1
assert join(4) == 4
assert join(1, 4, 2, 3, 1, 3, 2) == join(2, 3, 4)
def test_lattice_shortcircuit():
raises(SympifyError, lambda: join(object))
assert join(0, object) == 0
def test_lattice_print():
assert str(join(5, 4, 3, 2)) == 'join(2, 3, 4, 5)'
def test_lattice_make_args():
assert join.make_args(join(2, 3, 4)) == {S(2), S(3), S(4)}
assert join.make_args(0) == {0}
assert list(join.make_args(0))[0] is S.Zero
assert Add.make_args(0)[0] is S.Zero
def test_issue_14025():
a, b, c, d = symbols('a,b,c,d', commutative=False)
assert Mul(a, b, c).has(c*b) == False
assert Mul(a, b, c).has(b*c) == True
assert Mul(a, b, c, d).has(b*c*d) == True
def test_AssocOp_flatten():
a, b, c, d = symbols('a,b,c,d')
class MyAssoc(AssocOp):
identity = S.One
assert MyAssoc(a, MyAssoc(b, c)).args == \
MyAssoc(MyAssoc(a, b), c).args == \
MyAssoc(MyAssoc(a, b, c)).args == \
MyAssoc(a, b, c).args == \
(a, b, c)
u = MyAssoc(b, c)
v = MyAssoc(u, d, evaluate=False)
assert v.args == (u, d)
# like Add, any unevaluated outer call will flatten inner args
assert MyAssoc(a, v).args == (a, b, c, d)
def test_add_dispatcher():
class NewBase(Expr):
@property
def _add_handler(self):
return NewAdd
class NewAdd(NewBase, Add):
pass
add.register_handlerclass((Add, NewAdd), NewAdd)
a, b = Symbol('a'), NewBase()
# Add called as fallback
assert add(1, 2) == Add(1, 2)
assert add(a, a) == Add(a, a)
# selection by registered priority
assert add(a,b,a) == NewAdd(2*a, b)
def test_mul_dispatcher():
class NewBase(Expr):
@property
def _mul_handler(self):
return NewMul
class NewMul(NewBase, Mul):
pass
mul.register_handlerclass((Mul, NewMul), NewMul)
a, b = Symbol('a'), NewBase()
# Mul called as fallback
assert mul(1, 2) == Mul(1, 2)
assert mul(a, a) == Mul(a, a)
# selection by registered priority
assert mul(a,b,a) == NewMul(a**2, b)
| join |
python | huggingface__transformers | src/transformers/models/cvt/modeling_cvt.py | {
"start": 13970,
"end": 16827
} | class ____(nn.Module):
def __init__(self, config, stage):
super().__init__()
self.config = config
self.stage = stage
if self.config.cls_token[self.stage]:
self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.embed_dim[-1]))
self.embedding = CvtEmbeddings(
patch_size=config.patch_sizes[self.stage],
stride=config.patch_stride[self.stage],
num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1],
embed_dim=config.embed_dim[self.stage],
padding=config.patch_padding[self.stage],
dropout_rate=config.drop_rate[self.stage],
)
drop_path_rates = [
x.item() for x in torch.linspace(0, config.drop_path_rate[self.stage], config.depth[stage], device="cpu")
]
self.layers = nn.Sequential(
*[
CvtLayer(
num_heads=config.num_heads[self.stage],
embed_dim=config.embed_dim[self.stage],
kernel_size=config.kernel_qkv[self.stage],
padding_q=config.padding_q[self.stage],
padding_kv=config.padding_kv[self.stage],
stride_kv=config.stride_kv[self.stage],
stride_q=config.stride_q[self.stage],
qkv_projection_method=config.qkv_projection_method[self.stage],
qkv_bias=config.qkv_bias[self.stage],
attention_drop_rate=config.attention_drop_rate[self.stage],
drop_rate=config.drop_rate[self.stage],
drop_path_rate=drop_path_rates[self.stage],
mlp_ratio=config.mlp_ratio[self.stage],
with_cls_token=config.cls_token[self.stage],
)
for _ in range(config.depth[self.stage])
]
)
def forward(self, hidden_state):
cls_token = None
hidden_state = self.embedding(hidden_state)
batch_size, num_channels, height, width = hidden_state.shape
# rearrange b c h w -> b (h w) c"
hidden_state = hidden_state.view(batch_size, num_channels, height * width).permute(0, 2, 1)
if self.config.cls_token[self.stage]:
cls_token = self.cls_token.expand(batch_size, -1, -1)
hidden_state = torch.cat((cls_token, hidden_state), dim=1)
for layer in self.layers:
layer_outputs = layer(hidden_state, height, width)
hidden_state = layer_outputs
if self.config.cls_token[self.stage]:
cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1)
hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width)
return hidden_state, cls_token
| CvtStage |
python | PyCQA__pylint | pylint/extensions/comparison_placement.py | {
"start": 680,
"end": 2362
} | class ____(BaseChecker):
"""Checks the placement of constants in comparisons."""
# configuration section name
name = "comparison-placement"
msgs = {
"C2201": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
{"old_names": [("C0122", "old-misplaced-comparison-constant")]},
)
}
options = ()
def _check_misplaced_constant(
self,
node: nodes.Compare,
left: nodes.NodeNG,
right: nodes.NodeNG,
operator: str,
) -> None:
if isinstance(right, nodes.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
@utils.only_required_for_messages("misplaced-comparison-constant")
def visit_compare(self, node: nodes.Compare) -> None:
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, nodes.Const):
self._check_misplaced_constant(node, left, right, operator)
def register(linter: PyLinter) -> None:
linter.register_checker(MisplacedComparisonConstantChecker(linter))
| MisplacedComparisonConstantChecker |
python | great-expectations__great_expectations | great_expectations/render/renderer/page_renderer.py | {
"start": 1183,
"end": 24105
} | class ____(Renderer):
def __init__(
self,
column_section_renderer=None,
run_info_at_end: bool = False,
data_context=None,
) -> None:
"""
Args:
column_section_renderer:
run_info_at_end: Move the run info (Info, Batch Markers, Batch Kwargs) to the end
of the rendered output rather than after Statistics.
"""
super().__init__()
if column_section_renderer is None:
column_section_renderer = {"class_name": "ValidationResultsColumnSectionRenderer"}
module_name = "great_expectations.render.renderer.column_section_renderer"
self._column_section_renderer = instantiate_class_from_config(
config=column_section_renderer,
runtime_environment={},
config_defaults={
"module_name": column_section_renderer.get("module_name", module_name)
},
)
if not self._column_section_renderer:
raise ClassInstantiationError(
module_name=module_name,
package_name=None,
class_name=column_section_renderer["class_name"],
)
self.run_info_at_end = run_info_at_end
self._data_context = data_context
# TODO: deprecate dual batch api support in 0.14
def render(
self,
validation_results: ExpectationSuiteValidationResult,
suite_parameters=None,
):
# Gather run identifiers
run_name, run_time = self._parse_run_values(validation_results)
expectation_suite_name = validation_results.suite_name
batch_kwargs = (
validation_results.meta.get("batch_kwargs", {})
or validation_results.meta.get("batch_spec", {})
or {}
)
# Add datasource key to batch_kwargs if missing
if "datasource" not in batch_kwargs:
# Check if expectation_suite_name follows datasource.batch_kwargs_generator.data_asset_name.suite_name pattern # noqa: E501 # FIXME CoP
if len(expectation_suite_name.split(".")) == 4: # noqa: PLR2004 # FIXME CoP
batch_kwargs["datasource"] = expectation_suite_name.split(".")[0]
columns = self._group_evrs_by_column(validation_results, expectation_suite_name)
overview_content_blocks = [
self._render_validation_header(validation_results),
self._render_validation_statistics(validation_results=validation_results),
]
collapse_content_blocks = [
self._render_validation_info(validation_results=validation_results)
]
collapse_content_block = self._generate_collapse_content_block(
collapse_content_blocks, validation_results
)
if not self.run_info_at_end:
overview_content_blocks.append(collapse_content_block)
sections = self._collect_rendered_document_content_sections(
validation_results,
overview_content_blocks,
collapse_content_blocks,
columns,
)
# Determine whether we have a custom run_name
data_asset_name = batch_kwargs.get("data_asset_name", "")
page_title = self._determine_page_title(
run_name, run_time, data_asset_name, expectation_suite_name
)
return RenderedDocumentContent(
**{
"renderer_type": "ValidationResultsPageRenderer",
"page_title": page_title,
"batch_kwargs": batch_kwargs if "batch_kwargs" in validation_results.meta else None,
"batch_spec": batch_kwargs if "batch_spec" in validation_results.meta else None,
"expectation_suite_name": expectation_suite_name,
"sections": sections,
"utm_medium": "validation-results-page",
}
)
def _parse_run_values(
self, validation_results: ExpectationSuiteValidationResult
) -> Tuple[str, str]:
run_id: Union[str, dict, RunIdentifier] = validation_results.meta["run_id"]
if isinstance(run_id, str):
try:
run_time = parse(run_id).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
except (ValueError, TypeError):
run_time = "__none__"
run_name = run_id
elif isinstance(run_id, dict):
run_name = run_id.get("run_name") or "__none__"
try:
t = run_id.get("run_time", "")
run_time = parse(t).strftime("%Y-%m-%dT%H:%M:%SZ")
except (ValueError, TypeError):
run_time = "__none__"
elif isinstance(run_id, RunIdentifier):
run_name = run_id.run_name or "__none__"
run_time = run_id.run_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
return run_name, run_time
def _group_evrs_by_column(
self,
validation_results: ExpectationSuiteValidationResult,
expectation_suite_name: str,
) -> Dict[str, list]:
columns = defaultdict(list)
try:
suite_meta = (
self._data_context.suites.get(expectation_suite_name).meta
if self._data_context is not None
else None
)
except Exception:
suite_meta = None
meta_properties_to_render = self._get_meta_properties_notes(suite_meta)
for evr in validation_results.results:
if meta_properties_to_render is not None:
evr.expectation_config.kwargs["meta_properties_to_render"] = (
meta_properties_to_render
)
if "column" in evr.expectation_config.kwargs:
column = evr.expectation_config.kwargs["column"]
else:
column = "Table-Level Expectations"
columns[column].append(evr)
return columns
def _generate_collapse_content_block(
self,
collapse_content_blocks: List[RenderedTableContent],
validation_results: ExpectationSuiteValidationResult,
) -> CollapseContent:
attrs = [
("batch_markers", "Batch Markers"),
("batch_kwargs", "Batch Kwargs"),
("batch_parameters", "Batch Parameters"),
("batch_spec", "Batch Spec"),
("batch_request", "Batch Definition"),
]
for attr, header in attrs:
if validation_results.meta.get(attr):
table = self._render_nested_table_from_dict(
input_dict=validation_results.meta.get(attr),
header=header,
)
collapse_content_blocks.append(table)
collapse_content_block = CollapseContent(
**{
"collapse_toggle_link": "Show more info...",
"collapse": collapse_content_blocks,
"styling": {
"body": {"classes": ["card", "card-body"]},
"classes": ["col-12", "p-1"],
},
}
)
return collapse_content_block
def _collect_rendered_document_content_sections(
self,
validation_results: ExpectationSuiteValidationResult,
overview_content_blocks: List[RenderedComponentContent],
collapse_content_blocks: List[RenderedTableContent],
columns: Dict[str, list],
) -> List[RenderedSectionContent]:
ordered_columns = Renderer._get_column_list_from_evrs(validation_results)
sections = [
RenderedSectionContent(
**{
"section_name": "Overview",
"content_blocks": overview_content_blocks,
}
)
]
if "Table-Level Expectations" in columns:
sections += [
self._column_section_renderer.render(
validation_results=columns["Table-Level Expectations"],
suite_parameters=validation_results.suite_parameters,
)
]
sections += [
self._column_section_renderer.render(
validation_results=columns[column],
suite_parameters=validation_results.suite_parameters,
)
for column in ordered_columns
]
if self.run_info_at_end:
sections += [
RenderedSectionContent(
**{
"section_name": "Run Info",
"content_blocks": collapse_content_blocks,
}
)
]
return sections
def _determine_page_title(
self,
run_name: str,
run_time: str,
data_asset_name: str,
expectation_suite_name: str,
) -> str:
try:
run_name_as_time = parse(run_name)
except ValueError:
run_name_as_time = None
try:
run_time_datetime = parse(run_time)
except ValueError:
run_time_datetime = None
include_run_name: bool = False
if (
run_name_as_time != run_time_datetime # noqa: PLR1714 # FIXME CoP
and run_name_as_time != "__none__"
):
include_run_name = True
page_title = f"Validations / {expectation_suite_name}"
if data_asset_name:
page_title += f" / {data_asset_name}"
if include_run_name:
page_title += f" / {run_name}"
page_title += f" / {run_time}"
return page_title
@classmethod
def _get_meta_properties_notes(cls, suite_meta):
"""
This method is used for fetching the custom meta to be added at the suite level
"notes": {
"content": {
"dimension": "properties.dimension",
"severity": "properties.severity"
},
"format": LegacyDiagnosticRendererType.META_PROPERTIES
}
expectation level
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {
"column": "city"
},
"meta": {
"attributes": {
"properties": {
"dimension": "completeness",
"severity": "P3"
},
"user_meta": {
"notes": ""
}
}
}
}
This will fetch dimension and severity values which are in the expectation meta.
"""
if (
suite_meta is not None
and "notes" in suite_meta
and "format" in suite_meta["notes"]
and suite_meta["notes"]["format"] == LegacyDiagnosticRendererType.META_PROPERTIES
):
return suite_meta["notes"]["content"]
else:
return None
@classmethod
def _render_validation_header(cls, validation_results):
success = validation_results.success
expectation_suite_name = validation_results.suite_name
expectation_suite_path_components = (
[".." for _ in range(len(expectation_suite_name.split(".")) + 3)]
+ ["expectations"]
+ str(expectation_suite_name).split(".")
)
expectation_suite_path = f"{os.path.join(*expectation_suite_path_components)}.html" # noqa: PTH118 # FIXME CoP
# TODO: deprecate dual batch api support in 0.14
batch_kwargs = (
validation_results.meta.get("batch_kwargs", {})
or validation_results.meta.get("batch_spec", {})
or {}
)
data_asset_name = batch_kwargs.get("data_asset_name")
if success:
success = "Succeeded"
html_success_icon = (
'<i class="fas fa-check-circle text-success" aria-hidden="true"></i>'
)
else:
success = "Failed"
html_success_icon = '<i class="fas fa-times text-danger" aria-hidden="true"></i>'
return RenderedHeaderContent(
**{
"content_block_type": "header",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Overview",
"tag": "h5",
"styling": {"classes": ["m-0"]},
},
}
),
"subheader": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "${suite_title} ${expectation_suite_name}\n ${data_asset} ${data_asset_name}\n ${status_title} ${html_success_icon} ${success}", # noqa: E501 # FIXME CoP
"params": {
"suite_title": "Expectation Suite:",
"data_asset": "Data asset:",
"data_asset_name": data_asset_name,
"status_title": "Status:",
"expectation_suite_name": expectation_suite_name,
"success": success,
"html_success_icon": html_success_icon,
},
"styling": {
"params": {
"suite_title": {"classes": ["h6"]},
"status_title": {"classes": ["h6"]},
"expectation_suite_name": {
"tag": "a",
"attributes": {"href": expectation_suite_path},
},
},
"classes": ["mb-0", "mt-1"],
},
},
}
),
"styling": {
"classes": ["col-12", "p-0"],
"header": {"classes": ["alert", "alert-secondary"]},
},
}
)
@classmethod
def _render_validation_info(cls, validation_results):
run_id = validation_results.meta["run_id"]
if isinstance(run_id, str):
try:
run_time = parse(run_id).strftime("%Y-%m-%dT%H:%M:%SZ")
except (ValueError, TypeError):
run_time = "__none__"
run_name = run_id
elif isinstance(run_id, dict):
run_name = run_id.get("run_name") or "__none__"
try:
run_time = str(parse(run_id.get("run_time")).strftime("%Y-%m-%dT%H:%M:%SZ"))
except (ValueError, TypeError):
run_time = "__none__"
elif isinstance(run_id, RunIdentifier):
run_name = run_id.run_name or "__none__"
run_time = run_id.run_time.strftime("%Y-%m-%dT%H:%M:%SZ")
# TODO: Deprecate "great_expectations.__version__"
ge_version = validation_results.meta.get(
"great_expectations_version"
) or validation_results.meta.get("great_expectations.__version__")
return RenderedTableContent(
**{
"content_block_type": "table",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Info",
"tag": "h6",
"styling": {"classes": ["m-0"]},
},
}
),
"table": [
["Great Expectations Version", ge_version],
["Run Name", run_name],
["Run Time", run_time],
],
"styling": {
"classes": ["col-12", "table-responsive", "mt-1"],
"body": {
"classes": ["table", "table-sm"],
"styles": {
"margin-bottom": "0.5rem !important",
"margin-top": "0.5rem !important",
},
},
},
}
)
@classmethod
def _render_nested_table_from_dict(cls, input_dict, header=None, sub_table=False):
table_rows = []
for kwarg, value in input_dict.items():
if not isinstance(value, (dict, OrderedDict)):
table_row = [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$value",
"params": {"value": str(kwarg)},
"styling": {
"default": {"styles": {"word-break": "break-all"}},
},
},
"styling": {
"parent": {
"classes": ["pr-3"],
}
},
}
),
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$value",
"params": {"value": str(value)},
"styling": {
"default": {"styles": {"word-break": "break-all"}},
},
},
"styling": {
"parent": {
"classes": [],
}
},
}
),
]
else:
table_row = [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$value",
"params": {"value": str(kwarg)},
"styling": {
"default": {"styles": {"word-break": "break-all"}},
},
},
"styling": {
"parent": {
"classes": ["pr-3"],
}
},
}
),
cls._render_nested_table_from_dict(value, sub_table=True),
]
table_rows.append(table_row)
table_rows.sort(key=lambda row: row[0].string_template["params"]["value"])
if sub_table:
return RenderedTableContent(
**{
"content_block_type": "table",
"table": table_rows,
"styling": {
"classes": ["col-6", "table-responsive"],
"body": {"classes": ["table", "table-sm", "m-0"]},
"parent": {"classes": ["pt-0", "pl-0", "border-top-0"]},
},
}
)
else:
return RenderedTableContent(
**{
"content_block_type": "table",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": header,
"tag": "h6",
"styling": {"classes": ["m-0"]},
},
}
),
"table": table_rows,
"styling": {
"body": {
"classes": ["table", "table-sm"],
"styles": {
"margin-bottom": "0.5rem !important",
"margin-top": "0.5rem !important",
},
}
},
}
)
@classmethod
def _render_validation_statistics(cls, validation_results):
statistics = validation_results.statistics
statistics_dict = OrderedDict(
[
("evaluated_expectations", "Evaluated Expectations"),
("successful_expectations", "Successful Expectations"),
("unsuccessful_expectations", "Unsuccessful Expectations"),
("success_percent", "Success Percent"),
]
)
table_rows = []
for key, value in statistics_dict.items():
if statistics.get(key) is not None:
if key == "success_percent":
# table_rows.append([value, "{0:.2f}%".format(statistics[key])])
table_rows.append([value, f"{num_to_str(statistics[key], precision=4)}%"])
else:
table_rows.append([value, statistics[key]])
return RenderedTableContent(
**{
"content_block_type": "table",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Statistics",
"tag": "h6",
"styling": {"classes": ["m-0"]},
},
}
),
"table": table_rows,
"styling": {
"classes": ["col-6", "table-responsive", "mt-1", "p-1"],
"body": {
"classes": ["table", "table-sm"],
"styles": {
"margin-bottom": "0.5rem !important",
"margin-top": "0.5rem !important",
},
},
},
}
)
| ValidationResultsPageRenderer |
python | pypa__warehouse | warehouse/macaroons/caveats/_core.py | {
"start": 502,
"end": 596
} | class ____(CaveatError):
pass
@dataclass(frozen=True, slots=True)
| CaveatDeserializationError |
python | huggingface__transformers | src/transformers/models/gptj/modeling_gptj.py | {
"start": 30479,
"end": 34851
} | class ____(GPTJPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"}
def __init__(self, config):
super().__init__(config)
self.transformer = GPTJModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = transformer_outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The GPT-J Model transformer with a sequence classification head on top (linear layer).
[`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT, GPT-2, GPT-Neo) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
"""
)
| GPTJForCausalLM |
python | python-markdown__markdown | tests/test_legacy.py | {
"start": 1327,
"end": 2792
} | class ____(LegacyTestCase):
"""
Notes on "excluded" tests:
Quotes in attributes: attributes get output in different order
Inline HTML (Span): Backtick in raw HTML attribute TODO: fix me
Backslash escapes: Weird whitespace issue in output
`Ins` & `del`: Our behavior follows `markdown.pl`. I think PHP is wrong here
Auto Links: TODO: fix raw HTML so is doesn't match <hr@example.com> as a `<hr>`.
Empty List Item: We match `markdown.pl` here. Maybe someday we'll support this
Headers: TODO: fix headers to not require blank line before
Mixed `OL`s and `UL`s: We match `markdown.pl` here. I think PHP is wrong here
Emphasis: We have various minor differences in combined & incorrect em markup.
Maybe fix a few of them - but most aren't too important
Code block in a list item: We match `markdown.pl` - not sure how PHP gets that output??
PHP-Specific Bugs: Not sure what to make of the escaping stuff here.
Why is PHP not removing a backslash?
"""
location = os.path.join(parent_test_dir, 'php')
normalize = True
input_ext = '.text'
output_ext = '.xhtml'
exclude = [
'Quotes_in_attributes',
'Inline_HTML_(Span)',
'Backslash_escapes',
'Ins_&_del',
'Auto_Links',
'Empty_List_Item',
'Headers',
'Mixed_OLs_and_ULs',
'Emphasis',
'Code_block_in_a_list_item',
'PHP_Specific_Bugs'
]
| TestPhp |
python | tensorflow__tensorflow | tensorflow/python/summary/summary_iterator.py | {
"start": 917,
"end": 3085
} | class ____(object):
"""Yields `Event` protocol buffers from a given path."""
def __init__(self, path):
self._tf_record_iterator = tf_record.tf_record_iterator(path)
def __iter__(self):
return self
def __next__(self):
r = next(self._tf_record_iterator)
return event_pb2.Event.FromString(r)
next = __next__
@tf_export(v1=['train.summary_iterator'])
def summary_iterator(path):
# pylint: disable=line-too-long
"""Returns a iterator for reading `Event` protocol buffers from an event file.
You can use this function to read events written to an event file. It returns
a Python iterator that yields `Event` protocol buffers.
Example: Print the contents of an events file.
```python
for e in tf.compat.v1.train.summary_iterator(path to events file):
print(e)
```
Example: Print selected summary values.
```python
# This example supposes that the events file contains summaries with a
# summary value tag 'loss'. These could have been added by calling
# `add_summary()`, passing the output of a scalar summary op created with
# with: `tf.compat.v1.summary.scalar('loss', loss_tensor)`.
for e in tf.compat.v1.train.summary_iterator(path to events file):
for v in e.summary.value:
if v.tag == 'loss':
print(tf.make_ndarray(v.tensor))
```
Example: Continuously check for new summary values.
```python
summaries = tf.compat.v1.train.summary_iterator(path to events file)
while True:
for e in summaries:
for v in e.summary.value:
if v.tag == 'loss':
print(tf.make_ndarray(v.tensor))
# Wait for a bit before checking the file for any new events
time.sleep(wait time)
```
See the protocol buffer definitions of
[Event](https://www.tensorflow.org/code/tensorflow/core/util/event.proto)
and
[Summary](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
for more information about their attributes.
Args:
path: The path to an event file created by a `SummaryWriter`.
Returns:
A iterator that yields `Event` protocol buffers
"""
return _SummaryIterator(path)
| _SummaryIterator |
python | sphinx-doc__sphinx | sphinx/transforms/i18n.py | {
"start": 3151,
"end": 3507
} | class ____(SphinxTransform):
"""Preserve original translatable messages before translation"""
default_priority = 10 # this MUST be invoked before Locale transform
def apply(self, **kwargs: Any) -> None:
for node in self.document.findall(addnodes.translatable):
node.preserve_original_messages()
| PreserveTranslatableMessages |
python | django__django | tests/utils_tests/test_text.py | {
"start": 372,
"end": 19777
} | class ____(SimpleTestCase):
def test_get_text_list(self):
self.assertEqual(text.get_text_list(["a", "b", "c", "d"]), "a, b, c or d")
self.assertEqual(text.get_text_list(["a", "b", "c"], "and"), "a, b and c")
self.assertEqual(text.get_text_list(["a", "b"], "and"), "a and b")
self.assertEqual(text.get_text_list(["a"]), "a")
self.assertEqual(text.get_text_list([]), "")
with override("ar"):
self.assertEqual(text.get_text_list(["a", "b", "c"]), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.', ["This", "is", '"a person"', "test."]),
('This is "a person\'s" test.', ["This", "is", '"a person\'s"', "test."]),
('This is "a person\\"s" test.', ["This", "is", '"a person\\"s"', "test."]),
("\"a 'one", ['"a', "'one"]),
("all friends' tests", ["all", "friends'", "tests"]),
(
'url search_page words="something else"',
["url", "search_page", 'words="something else"'],
),
(
"url search_page words='something else'",
["url", "search_page", "words='something else'"],
),
(
'url search_page words "something else"',
["url", "search_page", "words", '"something else"'],
),
(
'url search_page words-"something else"',
["url", "search_page", 'words-"something else"'],
),
("url search_page words=hello", ["url", "search_page", "words=hello"]),
(
'url search_page words="something else',
["url", "search_page", 'words="something', "else"],
),
("cut:','|cut:' '", ["cut:','|cut:' '"]),
(lazystr("a b c d"), ["a", "b", "c", "d"]), # Test for #20231
]
for test, expected in testdata:
with self.subTest(value=test):
self.assertEqual(list(text.smart_split(test)), expected)
def test_truncate_chars(self):
truncator = text.Truncator("The quick brown fox jumped over the lazy dog.")
self.assertEqual(
"The quick brown fox jumped over the lazy dog.", truncator.chars(100)
),
self.assertEqual("The quick brown fox …", truncator.chars(21))
self.assertEqual("The quick brown fo.....", truncator.chars(23, "....."))
self.assertEqual(".....", truncator.chars(4, "....."))
nfc = text.Truncator("o\xfco\xfco\xfco\xfc")
nfd = text.Truncator("ou\u0308ou\u0308ou\u0308ou\u0308")
self.assertEqual("oüoüoüoü", nfc.chars(8))
self.assertEqual("oüoüoüoü", nfd.chars(8))
self.assertEqual("oü…", nfc.chars(3))
self.assertEqual("oü…", nfd.chars(3))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator("-B\u030aB\u030a----8")
self.assertEqual("-B\u030a…", truncator.chars(3))
self.assertEqual("-B\u030aB\u030a-…", truncator.chars(5))
self.assertEqual("-B\u030aB\u030a----8", truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator("-----")
self.assertEqual("---B\u030a", truncator.chars(4, "B\u030a"))
self.assertEqual("-----", truncator.chars(5, "B\u030a"))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual("...", text.Truncator("asdf").chars(1, truncate="..."))
# lazy strings are handled correctly
self.assertEqual(
text.Truncator(lazystr("The quick brown fox")).chars(10), "The quick…"
)
def test_truncate_chars_html(self):
truncator = text.Truncator(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em>'
"</strong></p>"
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em>'
"</strong></p>",
truncator.chars(80, html=True),
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em>'
"</strong></p>",
truncator.chars(46, html=True),
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog…</em>'
"</strong></p>",
truncator.chars(45, html=True),
)
self.assertEqual(
'<p id="par"><strong><em>The quick…</em></strong></p>',
truncator.chars(10, html=True),
)
self.assertEqual(
'<p id="par"><strong><em>…</em></strong></p>',
truncator.chars(1, html=True),
)
self.assertEqual("", truncator.chars(0, html=True))
self.assertEqual("", truncator.chars(-1, html=True))
self.assertEqual(
'<p id="par"><strong><em>The qu....</em></strong></p>',
truncator.chars(10, "....", html=True),
)
self.assertEqual(
'<p id="par"><strong><em>The quick </em></strong></p>',
truncator.chars(10, "", html=True),
)
truncator = text.Truncator("foo</p>")
self.assertEqual("foo</p>", truncator.chars(5, html=True))
@patch("django.utils.text.Truncator.MAX_LENGTH_HTML", 10_000)
def test_truncate_chars_html_size_limit(self):
max_len = text.Truncator.MAX_LENGTH_HTML
bigger_len = text.Truncator.MAX_LENGTH_HTML + 1
valid_html = "<p>Joel is a slug</p>" # 14 chars
perf_test_values = [
("</a" + "\t" * (max_len - 6) + "//>", "</a>"),
("</p" + "\t" * bigger_len + "//>", "</p>"),
("&" * bigger_len, ""),
("_X<<<<<<<<<<<>", "_X<<<<<<<…"),
(valid_html * bigger_len, "<p>Joel is a…</p>"), # 10 chars
]
for value, expected in perf_test_values:
with self.subTest(value=value):
truncator = text.Truncator(value)
self.assertEqual(expected, truncator.chars(10, html=True))
def test_truncate_chars_html_with_newline_inside_tag(self):
truncator = text.Truncator(
'<p>The quick <a href="xyz.html"\n id="mylink">brown fox</a> jumped over '
"the lazy dog.</p>"
)
self.assertEqual(
'<p>The quick <a href="xyz.html"\n id="mylink">brow…</a></p>',
truncator.chars(15, html=True),
)
self.assertEqual(
"<p>Th…</p>",
truncator.chars(3, html=True),
)
def test_truncate_chars_html_with_void_elements(self):
truncator = text.Truncator(
"<br/>The <hr />quick brown fox jumped over the lazy dog."
)
self.assertEqual("<br/>The <hr />quick brown…", truncator.chars(16, html=True))
truncator = text.Truncator(
"<br>The <hr/>quick <em>brown fox</em> jumped over the lazy dog."
)
self.assertEqual(
"<br>The <hr/>quick <em>brown…</em>", truncator.chars(16, html=True)
)
self.assertEqual("<br>The <hr/>q…", truncator.chars(6, html=True))
self.assertEqual("<br>The <hr/>…", truncator.chars(5, html=True))
self.assertEqual("<br>The…", truncator.chars(4, html=True))
self.assertEqual("<br>Th…", truncator.chars(3, html=True))
def test_truncate_chars_html_with_html_entities(self):
truncator = text.Truncator(
"<i>Buenos días! ¿Cómo está?</i>"
)
self.assertEqual(
"<i>Buenos días! ¿Cómo está?</i>",
truncator.chars(40, html=True),
)
self.assertEqual(
"<i>Buenos días…</i>",
truncator.chars(12, html=True),
)
self.assertEqual(
"<i>Buenos días! ¿Cómo está…</i>",
truncator.chars(24, html=True),
)
truncator = text.Truncator("<p>I <3 python, what about you?</p>")
self.assertEqual("<p>I <3 python, wh…</p>", truncator.chars(16, html=True))
def test_truncate_words(self):
truncator = text.Truncator("The quick brown fox jumped over the lazy dog.")
self.assertEqual(
"The quick brown fox jumped over the lazy dog.", truncator.words(10)
)
self.assertEqual("The quick brown fox…", truncator.words(4))
self.assertEqual("The quick brown fox[snip]", truncator.words(4, "[snip]"))
# lazy strings are handled correctly
truncator = text.Truncator(
lazystr("The quick brown fox jumped over the lazy dog.")
)
self.assertEqual("The quick brown fox…", truncator.words(4))
self.assertEqual("", truncator.words(0))
self.assertEqual("", truncator.words(-1))
def test_truncate_html_words(self):
truncator = text.Truncator(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em>'
"</strong></p>"
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em>'
"</strong></p>",
truncator.words(10, html=True),
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox…</em></strong></p>',
truncator.words(4, html=True),
)
self.assertEqual(
"",
truncator.words(0, html=True),
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox....</em></strong></p>',
truncator.words(4, "....", html=True),
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox</em></strong></p>',
truncator.words(4, "", html=True),
)
truncator = text.Truncator(
"<p>The quick \t brown fox jumped over the lazy dog.</p>"
)
self.assertEqual(
"<p>The quick brown fox…</p>",
truncator.words(4, html=True),
)
# Test with new line inside tag
truncator = text.Truncator(
'<p>The quick <a href="xyz.html"\n id="mylink">brown fox</a> jumped over '
"the lazy dog.</p>"
)
self.assertEqual(
'<p>The quick <a href="xyz.html"\n id="mylink">brown…</a></p>',
truncator.words(3, html=True),
)
self.assertEqual(
"<p>The…</p>",
truncator.words(1, html=True),
)
# Test self-closing tags
truncator = text.Truncator(
"<br/>The <hr />quick brown fox jumped over the lazy dog."
)
self.assertEqual("<br/>The <hr />quick brown…", truncator.words(3, html=True))
truncator = text.Truncator(
"<br>The <hr/>quick <em>brown fox</em> jumped over the lazy dog."
)
self.assertEqual(
"<br>The <hr/>quick <em>brown…</em>", truncator.words(3, html=True)
)
# Test html entities
truncator = text.Truncator(
"<i>Buenos días! ¿Cómo está?</i>"
)
self.assertEqual(
"<i>Buenos días! ¿Cómo…</i>",
truncator.words(3, html=True),
)
truncator = text.Truncator("<p>I <3 python, what about you?</p>")
self.assertEqual("<p>I <3 python,…</p>", truncator.words(3, html=True))
truncator = text.Truncator("foo</p>")
self.assertEqual("foo</p>", truncator.words(3, html=True))
# Only open brackets.
truncator = text.Truncator("<" * 60_000)
self.assertEqual(truncator.words(1, html=True), "<…")
# Tags with special chars in attrs.
truncator = text.Truncator(
"""<i style="margin: 5%; font: *;">Hello, my dear lady!</i>"""
)
self.assertEqual(
"""<i style="margin: 5%; font: *;">Hello, my dear…</i>""",
truncator.words(3, html=True),
)
# Tags with special non-latin chars in attrs.
truncator = text.Truncator("""<p data-x="א">Hello, my dear lady!</p>""")
self.assertEqual(
"""<p data-x="א">Hello, my dear…</p>""",
truncator.words(3, html=True),
)
# Misplaced brackets.
truncator = text.Truncator("hello >< world")
self.assertEqual(truncator.words(1, html=True), "hello…")
self.assertEqual(truncator.words(2, html=True), "hello >…")
self.assertEqual(truncator.words(3, html=True), "hello ><…")
self.assertEqual(truncator.words(4, html=True), "hello >< world")
@patch("django.utils.text.Truncator.MAX_LENGTH_HTML", 10_000)
def test_truncate_words_html_size_limit(self):
max_len = text.Truncator.MAX_LENGTH_HTML
bigger_len = text.Truncator.MAX_LENGTH_HTML + 1
valid_html = "<p>Joel is a slug</p>" # 4 words
perf_test_values = [
("</a" + "\t" * (max_len - 6) + "//>", "</a>"),
("</p" + "\t" * bigger_len + "//>", "</p>"),
("&" * max_len, ""),
("&" * bigger_len, ""),
("_X<<<<<<<<<<<>", "_X<<<<<<<<<<<>"),
(valid_html * bigger_len, valid_html * 12 + "<p>Joel is…</p>"), # 50 words
]
for value, expected in perf_test_values:
with self.subTest(value=value):
truncator = text.Truncator(value)
self.assertEqual(expected, truncator.words(50, html=True))
def test_wrap(self):
digits = "1234 67 9"
self.assertEqual(text.wrap(digits, 100), "1234 67 9")
self.assertEqual(text.wrap(digits, 9), "1234 67 9")
self.assertEqual(text.wrap(digits, 8), "1234 67\n9")
self.assertEqual(text.wrap("short\na long line", 7), "short\na long\nline")
self.assertEqual(
text.wrap("do-not-break-long-words please? ok", 8),
"do-not-break-long-words\nplease?\nok",
)
long_word = "l%sng" % ("o" * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(
text.wrap("a %s word" % long_word, 10), "a\n%s\nword" % long_word
)
self.assertEqual(text.wrap(lazystr(digits), 100), "1234 67 9")
def test_normalize_newlines(self):
self.assertEqual(
text.normalize_newlines("abc\ndef\rghi\r\n"), "abc\ndef\nghi\n"
)
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
self.assertEqual(text.normalize_newlines(""), "")
self.assertEqual(
text.normalize_newlines(lazystr("abc\ndef\rghi\r\n")), "abc\ndef\nghi\n"
)
def test_phone2numeric(self):
numeric = text.phone2numeric("0800 flowers")
self.assertEqual(numeric, "0800 3569377")
lazy_numeric = lazystr(text.phone2numeric("0800 flowers"))
self.assertEqual(lazy_numeric, "0800 3569377")
def test_slugify(self):
items = (
# given - expected - Unicode?
("Hello, World!", "hello-world", False),
("spam & eggs", "spam-eggs", False),
(" multiple---dash and space ", "multiple-dash-and-space", False),
("\t whitespace-in-value \n", "whitespace-in-value", False),
("underscore_in-value", "underscore_in-value", False),
("__strip__underscore-value___", "strip__underscore-value", False),
("--strip-dash-value---", "strip-dash-value", False),
("__strip-mixed-value---", "strip-mixed-value", False),
("_ -strip-mixed-value _-", "strip-mixed-value", False),
("spam & ıçüş", "spam-ıçüş", True),
("foo ıç bar", "foo-ıç-bar", True),
(" foo ıç bar", "foo-ıç-bar", True),
("你好", "你好", True),
("İstanbul", "istanbul", True),
)
for value, output, is_unicode in items:
with self.subTest(value=value):
self.assertEqual(text.slugify(value, allow_unicode=is_unicode), output)
# Interning the result may be useful, e.g. when fed to Path.
with self.subTest("intern"):
self.assertEqual(sys.intern(text.slugify("a")), "a")
def test_unescape_string_literal(self):
items = [
('"abc"', "abc"),
("'abc'", "abc"),
('"a "bc""', 'a "bc"'),
("''ab' c'", "'ab' c"),
]
for value, output in items:
with self.subTest(value=value):
self.assertEqual(text.unescape_string_literal(value), output)
self.assertEqual(text.unescape_string_literal(lazystr(value)), output)
def test_unescape_string_literal_invalid_value(self):
items = ["", "abc", "'abc\""]
for item in items:
msg = f"Not a string literal: {item!r}"
with self.assertRaisesMessage(ValueError, msg):
text.unescape_string_literal(item)
def test_get_valid_filename(self):
filename = "^&'@{}[],$=!-#()%+~_123.txt"
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
self.assertEqual(text.get_valid_filename(lazystr(filename)), "-_123.txt")
msg = "Could not derive file name from '???'"
with self.assertRaisesMessage(SuspiciousFileOperation, msg):
text.get_valid_filename("???")
# After sanitizing this would yield '..'.
msg = "Could not derive file name from '$.$.$'"
with self.assertRaisesMessage(SuspiciousFileOperation, msg):
text.get_valid_filename("$.$.$")
def test_compress_sequence(self):
data = [{"key": i} for i in range(10)]
seq = list(json.JSONEncoder().iterencode(data))
seq = [s.encode() for s in seq]
actual_length = len(b"".join(seq))
out = text.compress_sequence(seq)
compressed_length = len(b"".join(out))
self.assertLess(compressed_length, actual_length)
def test_format_lazy(self):
self.assertEqual("django/test", format_lazy("{}/{}", "django", lazystr("test")))
self.assertEqual("django/test", format_lazy("{0}/{1}", *("django", "test")))
self.assertEqual(
"django/test", format_lazy("{a}/{b}", **{"a": "django", "b": "test"})
)
self.assertEqual(
"django/test", format_lazy("{a[0]}/{a[1]}", a=("django", "test"))
)
t = {}
s = format_lazy("{0[a]}-{p[a]}", t, p=t)
t["a"] = lazystr("django")
self.assertEqual("django-django", s)
t["a"] = "update"
self.assertEqual("update-update", s)
# The format string can be lazy. (string comes from contrib.admin)
s = format_lazy(
gettext_lazy("Added {name} “{object}”."),
name="article",
object="My first try",
)
with override("fr"):
self.assertEqual("Ajout de article «\xa0My first try\xa0».", s)
| TestUtilsText |
python | huggingface__transformers | src/transformers/models/smollm3/modeling_smollm3.py | {
"start": 15263,
"end": 15814
} | class ____(PreTrainedModel):
config: SmolLM3Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["SmolLM3DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": SmolLM3DecoderLayer,
"attentions": SmolLM3Attention,
}
@auto_docstring
| SmolLM3PreTrainedModel |
python | allegroai__clearml | clearml/backend_api/services/v2_13/workers.py | {
"start": 60197,
"end": 66588
} | class ____(Request):
"""
Returns statistics for the selected workers and time range aggregated by date intervals.
:param worker_ids: List of worker ids to collect metrics for. If not provided
or empty then all the company workers metrics are analyzed.
:type worker_ids: Sequence[str]
:param from_date: Starting time (in seconds from epoch) for collecting
statistics
:type from_date: float
:param to_date: Ending time (in seconds from epoch) for collecting statistics
:type to_date: float
:param interval: Time interval in seconds for a single statistics point. The
minimal value is 1
:type interval: int
:param items: List of metric keys and requested statistics
:type items: Sequence[StatItem]
:param split_by_variant: If True, then break statistics by hardware sub types
:type split_by_variant: bool
"""
_service = "workers"
_action = "get_stats"
_version = "2.13"
_schema = {
"definitions": {
"aggregation_type": {
"description": "Metric aggregation type",
"enum": ["avg", "min", "max"],
"type": "string",
},
"stat_item": {
"properties": {
"category": {
"oneOf": [
{"$ref": "#/definitions/aggregation_type"},
{"type": "null"},
]
},
"key": {
"description": "Name of a metric",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"from_date": {
"description": "Starting time (in seconds from epoch) for collecting statistics",
"type": "number",
},
"interval": {
"description": "Time interval in seconds for a single statistics point. The minimal value is 1",
"type": "integer",
},
"items": {
"description": "List of metric keys and requested statistics",
"items": {"$ref": "#/definitions/stat_item"},
"type": "array",
},
"split_by_variant": {
"default": False,
"description": "If true then break statistics by hardware sub types",
"type": "boolean",
},
"to_date": {
"description": "Ending time (in seconds from epoch) for collecting statistics",
"type": "number",
},
"worker_ids": {
"description": "List of worker ids to collect metrics for. If not provided or empty then all the company workers metrics are analyzed.",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"required": ["from_date", "to_date", "interval", "items"],
"type": "object",
}
def __init__(
self,
from_date: float,
to_date: float,
interval: int,
items: List[Any],
worker_ids: Optional[List[str]] = None,
split_by_variant: Optional[bool] = False,
**kwargs: Any
) -> None:
super(GetStatsRequest, self).__init__(**kwargs)
self.worker_ids = worker_ids
self.from_date = from_date
self.to_date = to_date
self.interval = interval
self.items = items
self.split_by_variant = split_by_variant
@schema_property("worker_ids")
def worker_ids(self) -> Optional[List[str]]:
return self._property_worker_ids
@worker_ids.setter
def worker_ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_worker_ids = None
return
self.assert_isinstance(value, "worker_ids", (list, tuple))
self.assert_isinstance(value, "worker_ids", six.string_types, is_array=True)
self._property_worker_ids = value
@schema_property("from_date")
def from_date(self) -> float:
return self._property_from_date
@from_date.setter
def from_date(self, value: float) -> None:
if value is None:
self._property_from_date = None
return
self.assert_isinstance(value, "from_date", six.integer_types + (float,))
self._property_from_date = value
@schema_property("to_date")
def to_date(self) -> float:
return self._property_to_date
@to_date.setter
def to_date(self, value: float) -> None:
if value is None:
self._property_to_date = None
return
self.assert_isinstance(value, "to_date", six.integer_types + (float,))
self._property_to_date = value
@schema_property("interval")
def interval(self) -> int:
return self._property_interval
@interval.setter
def interval(self, value: int) -> None:
if value is None:
self._property_interval = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "interval", six.integer_types)
self._property_interval = value
@schema_property("items")
def items(self) -> List[Any]:
return self._property_items
@items.setter
def items(self, value: List[Any]) -> None:
if value is None:
self._property_items = None
return
self.assert_isinstance(value, "items", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [StatItem.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "items", StatItem, is_array=True)
self._property_items = value
@schema_property("split_by_variant")
def split_by_variant(self) -> Optional[bool]:
return self._property_split_by_variant
@split_by_variant.setter
def split_by_variant(self, value: Optional[bool]) -> None:
if value is None:
self._property_split_by_variant = None
return
self.assert_isinstance(value, "split_by_variant", (bool,))
self._property_split_by_variant = value
| GetStatsRequest |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 42349,
"end": 43324
} | class ____(
BaseOrchestrationRule[orm_models.Run, Union[core.TaskRunPolicy, core.FlowRunPolicy]]
):
"""
Ensures scheduled time is copied from scheduled states to pending states.
If a new scheduled time has been proposed on the pending state, the scheduled time
on the scheduled state will be ignored.
"""
FROM_STATES = {StateType.SCHEDULED}
TO_STATES = {StateType.PENDING}
async def before_transition(
self,
initial_state: states.State[Any] | None,
proposed_state: states.State[Any] | None,
context: OrchestrationContext[
orm_models.Run, core.TaskRunPolicy | core.FlowRunPolicy
],
) -> None:
if initial_state is None or proposed_state is None:
return
if not proposed_state.state_details.scheduled_time:
proposed_state.state_details.scheduled_time = (
initial_state.state_details.scheduled_time
)
| CopyScheduledTime |
python | marshmallow-code__marshmallow | src/marshmallow/types.py | {
"start": 594,
"end": 897
} | class ____(typing.Protocol):
def __call__(
self,
output: typing.Any,
original_data: typing.Any = ...,
*,
partial: bool | StrSequenceOrSet | None = None,
unknown: UnknownOption | None = None,
many: bool = False,
) -> None: ...
| SchemaValidator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams1.py | {
"start": 1360,
"end": 1397
} | class ____[T]:
pass
| ForwardRefClass |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 22280,
"end": 25806
} | class ____(Mean):
"""Wraps a stateless metric function with the Mean metric.
You could use this class to quickly build a mean metric from a function. The
function needs to have the signature `fn(y_true, y_pred)` and return a
per-sample loss array. `MeanMetricWrapper.result()` will return
the average metric value across all samples seen so far.
For example:
```python
def accuracy(y_true, y_pred):
return tf.cast(tf.math.equal(y_true, y_pred), tf.float32)
accuracy_metric = tf.keras.metrics.MeanMetricWrapper(fn=accuracy)
keras_model.compile(..., metrics=accuracy_metric)
```
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: Keyword arguments to pass on to `fn`.
"""
def __init__(self, fn, name=None, dtype=None, **kwargs):
super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
sample_weight: Optional `sample_weight` acts as a
coefficient for the metric. If a scalar is provided, then the metric is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the metric for each sample of the batch is rescaled
by the corresponding element in the `sample_weight` vector. If the shape
of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
to this shape), then each metric element of `y_pred` is scaled by the
corresponding value of `sample_weight`. (Note on `dN-1`: all metric
functions reduce by 1 dimension, usually the last axis (-1)).
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_true, y_pred], sample_weight = (
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_true, y_pred], sample_weight))
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx())
matches = ag_fn(y_true, y_pred, **self._fn_kwargs)
return super(MeanMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
if type(self) is MeanMetricWrapper: # pylint: disable=unidiomatic-typecheck
# Only include function argument when the object is a MeanMetricWrapper
# and not a subclass.
config['fn'] = self._fn
for k, v in self._fn_kwargs.items():
config[k] = backend.eval(v) if is_tensor_or_variable(v) else v
base_config = super(MeanMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
# Note that while MeanMetricWrapper itself isn't public, objects of this
# class may be created and added to the model by calling model.compile.
fn = config.pop('fn', None)
if cls is MeanMetricWrapper:
return cls(get(fn), **config)
return super(MeanMetricWrapper, cls).from_config(config)
| MeanMetricWrapper |
python | huggingface__transformers | src/transformers/models/roc_bert/modeling_roc_bert.py | {
"start": 58652,
"end": 63084
} | class ____(RoCBertPreTrainedModel):
# Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->RoCBert,bert->roc_bert
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roc_bert = RoCBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
input_shape_ids: Optional[torch.Tensor] = None,
input_pronunciation_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
input_shape_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the shape vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input_shape_ids)
input_pronunciation_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the pronunciation vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input_pronunciation_ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.roc_bert(
input_ids,
input_shape_ids=input_shape_ids,
input_pronunciation_ids=input_pronunciation_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| RoCBertForSequenceClassification |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/watchdog.py | {
"start": 844,
"end": 2409
} | class ____(object):
"""A class to dump stack traces if no activity happens in ClusterCoordinator."""
def __init__(self, timeout=-1, traceback_file=sys.stdout, on_triggered=None):
if os.environ.get("TF_CLUSTER_COORDINATOR_WATCH_DOG_TIMEOUT",
"").isnumeric():
timeout = int(os.environ["TF_CLUSTER_COORDINATOR_WATCH_DOG_TIMEOUT"])
self._timeout = timeout
self._last_activity_time = time.time()
self._traceback_file = traceback_file
self._on_triggered = on_triggered
self._stopped = False
if timeout > 0:
self._watchdog_thread = threading.Thread(
target=self._watchdog_function, name="WatchDog", daemon=True)
self._watchdog_thread.start()
def stop(self):
self._stopped = True
def _watchdog_function(self):
"""The watchdog thread."""
logging.info("Starting watchdog thread with timeout %r", self._timeout)
while not self._stopped:
time.sleep(self._timeout / 10.0)
current_time = time.time()
if current_time - self._last_activity_time >= self._timeout:
logging.warning(
"No activity for ClusterCoordinator for %r seconds. "
"Dumping stack traces.", self._timeout)
if self._on_triggered:
self._on_triggered()
faulthandler.dump_traceback(file=self._traceback_file)
self._traceback_file.write("==== End of stack traces ====\n")
self._last_activity_time = current_time
def report_closure_done(self):
if self._timeout > 0:
self._last_activity_time = time.time()
| WatchDog |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/grant_types/implicit.py | {
"start": 217,
"end": 16810
} | class ____(GrantTypeBase):
"""`Implicit Grant`_
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
Unlike the authorization code grant type, in which the client makes
separate requests for authorization and for an access token, the
client receives the access token as the result of the authorization
request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI --->| |
| User- | | Authorization |
| Agent -|----(B)-- User authenticates -->| Server |
| | | |
| |<---(C)--- Redirection URI ----<| |
| | with Access Token +---------------+
| | in Fragment
| | +---------------+
| |----(D)--- Redirection URI ---->| Web-Hosted |
| | without Fragment | Client |
| | | Resource |
| (F) |<---(E)------- Script ---------<| |
| | +---------------+
+-|--------+
| |
(A) (G) Access Token
| |
^ v
+---------+
| |
| Client |
| |
+---------+
Note: The lines illustrating steps (A) and (B) are broken into two
parts as they pass through the user-agent.
Figure 4: Implicit Grant Flow
The flow illustrated in Figure 4 includes the following steps:
(A) The client initiates the flow by directing the resource owner's
user-agent to the authorization endpoint. The client includes
its client identifier, requested scope, local state, and a
redirection URI to which the authorization server will send the
user-agent back once access is granted (or denied).
(B) The authorization server authenticates the resource owner (via
the user-agent) and establishes whether the resource owner
grants or denies the client's access request.
(C) Assuming the resource owner grants access, the authorization
server redirects the user-agent back to the client using the
redirection URI provided earlier. The redirection URI includes
the access token in the URI fragment.
(D) The user-agent follows the redirection instructions by making a
request to the web-hosted client resource (which does not
include the fragment per [RFC2616]). The user-agent retains the
fragment information locally.
(E) The web-hosted client resource returns a web page (typically an
HTML document with an embedded script) capable of accessing the
full redirection URI including the fragment retained by the
user-agent, and extracting the access token (and other
parameters) contained in the fragment.
(F) The user-agent executes the script provided by the web-hosted
client resource locally, which extracts the access token.
(G) The user-agent passes the access token to the client.
See `Section 10.3`_ and `Section 10.16`_ for important security considerations
when using the implicit grant.
.. _`Implicit Grant`: https://tools.ietf.org/html/rfc6749#section-4.2
.. _`Section 10.3`: https://tools.ietf.org/html/rfc6749#section-10.3
.. _`Section 10.16`: https://tools.ietf.org/html/rfc6749#section-10.16
"""
response_types = ['token']
grant_allows_refresh_token = False
def create_authorization_response(self, request, token_handler):
"""Create an authorization response.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
response_type
REQUIRED. Value MUST be set to "token" for standard OAuth2 implicit flow
or "id_token token" or just "id_token" for OIDC implicit flow
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
The authorization server validates the request to ensure that all
required parameters are present and valid. The authorization server
MUST verify that the redirection URI to which it will redirect the
access token matches a redirection URI registered by the client as
described in `Section 3.1.2`_.
.. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
"""
return self.create_token_response(request, token_handler)
def create_token_response(self, request, token_handler):
"""Return token or error embedded in the URI fragment.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format, per
`Appendix B`_:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
`Section 7.1`_. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client;
otherwise, REQUIRED. The scope of the access token as
described by `Section 3.3`_.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
The authorization server MUST NOT issue a refresh token.
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
"""
try:
self.validate_token_request(request)
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
except errors.FatalClientError as e:
log.debug('Fatal client error during validation of %r. %r.',
request, e)
raise
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B:
# https://tools.ietf.org/html/rfc6749#appendix-B
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
return {'Location': common.add_params_to_uri(request.redirect_uri, e.twotuples,
fragment=True)}, None, 302
# In OIDC implicit flow it is possible to have a request_type that does not include the access_token!
# "id_token token" - return the access token and the id token
# "id_token" - don't return the access token
token = token_handler.create_token(request, refresh_token=False) if 'token' in request.response_type.split() else {}
if request.state is not None:
token['state'] = request.state
for modifier in self._token_modifiers:
token = modifier(token, token_handler, request)
# In OIDC implicit flow it is possible to have a request_type that does
# not include the access_token! In this case there is no need to save a token.
if "token" in request.response_type.split():
self.request_validator.save_token(token, request)
return self.prepare_authorization_response(
request, token, {}, None, 302)
def validate_authorization_request(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
return self.validate_token_request(request)
def validate_token_request(self, request):
"""Check the token request for normal and fatal errors.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
This method is very similar to validate_authorization_request in
the AuthorizationCodeGrant but differ in a few subtle areas.
A normal error could be a missing response_type parameter or the client
attempting to access scope it is not allowed to ask authorization for.
Normal errors can safely be included in the redirection URI and
sent back to the client.
Fatal errors occur when the client_id or redirect_uri is invalid or
missing. These must be caught by the provider and handled, how this
is done is outside of the scope of OAuthLib but showing an error
page describing the issue is a good idea.
"""
# First check for fatal errors
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
# First check duplicate parameters
for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'):
try:
duplicate_params = request.duplicate_params
except ValueError:
raise errors.InvalidRequestFatalError(description='Unable to parse query string', request=request)
if param in duplicate_params:
raise errors.InvalidRequestFatalError(description='Duplicate %s parameter.' % param, request=request)
# REQUIRED. The client identifier as described in Section 2.2.
# https://tools.ietf.org/html/rfc6749#section-2.2
if not request.client_id:
raise errors.MissingClientIdError(request=request)
if not self.request_validator.validate_client_id(request.client_id, request):
raise errors.InvalidClientIdError(request=request)
# OPTIONAL. As described in Section 3.1.2.
# https://tools.ietf.org/html/rfc6749#section-3.1.2
self._handle_redirects(request)
# Then check for normal errors.
request_info = self._run_custom_validators(request,
self.custom_validators.all_pre)
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B.
# https://tools.ietf.org/html/rfc6749#appendix-B
# Note that the correct parameters to be added are automatically
# populated through the use of specific exceptions
# REQUIRED.
if request.response_type is None:
raise errors.MissingResponseTypeError(request=request)
# Value MUST be one of our registered types: "token" by default or if using OIDC "id_token" or "id_token token"
elif not set(request.response_type.split()).issubset(self.response_types):
raise errors.UnsupportedResponseTypeError(request=request)
log.debug('Validating use of response_type token for client %r (%r).',
request.client_id, request.client)
if not self.request_validator.validate_response_type(request.client_id,
request.response_type,
request.client, request):
log.debug('Client %s is not authorized to use response_type %s.',
request.client_id, request.response_type)
raise errors.UnauthorizedClientError(request=request)
# OPTIONAL. The scope of the access request as described by Section 3.3
# https://tools.ietf.org/html/rfc6749#section-3.3
self.validate_scopes(request)
request_info.update({
'client_id': request.client_id,
'redirect_uri': request.redirect_uri,
'response_type': request.response_type,
'state': request.state,
'request': request,
})
request_info = self._run_custom_validators(
request,
self.custom_validators.all_post,
request_info
)
return request.scopes, request_info
def _run_custom_validators(self,
request,
validations,
request_info=None):
# Make a copy so we don't modify the existing request_info dict
request_info = {} if request_info is None else request_info.copy()
# For implicit grant, auth_validators and token_validators are
# basically equivalent since the token is returned from the
# authorization endpoint.
for validator in validations:
result = validator(request)
if result is not None:
request_info.update(result)
return request_info
| ImplicitGrant |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/utils.py | {
"start": 6695,
"end": 8497
} | class ____(ast.NodeVisitor):
"""Get nonlocal variables accessed."""
def __init__(self) -> None:
"""Create a NonLocals visitor."""
self.loads: set[str] = set()
self.stores: set[str] = set()
@override
def visit_Name(self, node: ast.Name) -> None:
"""Visit a name node.
Args:
node: The node to visit.
"""
if isinstance(node.ctx, ast.Load):
self.loads.add(node.id)
elif isinstance(node.ctx, ast.Store):
self.stores.add(node.id)
@override
def visit_Attribute(self, node: ast.Attribute) -> None:
"""Visit an attribute node.
Args:
node: The node to visit.
"""
if isinstance(node.ctx, ast.Load):
parent = node.value
attr_expr = node.attr
while isinstance(parent, ast.Attribute):
attr_expr = parent.attr + "." + attr_expr
parent = parent.value
if isinstance(parent, ast.Name):
self.loads.add(parent.id + "." + attr_expr)
self.loads.discard(parent.id)
elif isinstance(parent, ast.Call):
if isinstance(parent.func, ast.Name):
self.loads.add(parent.func.id)
else:
parent = parent.func
attr_expr = ""
while isinstance(parent, ast.Attribute):
if attr_expr:
attr_expr = parent.attr + "." + attr_expr
else:
attr_expr = parent.attr
parent = parent.value
if isinstance(parent, ast.Name):
self.loads.add(parent.id + "." + attr_expr)
| NonLocals |
python | joke2k__faker | faker/providers/date_time/tl_PH/__init__.py | {
"start": 49,
"end": 155
} | class ____(FilPhProvider):
"""No difference from DateTime Provider for fil_PH locale"""
pass
| Provider |
python | jazzband__prettytable | tests/test_prettytable.py | {
"start": 10196,
"end": 11290
} | class ____:
"""Make sure alignment works regardless of when it was set"""
def test_aligned_ascii(
self, aligned_before_table: PrettyTable, aligned_after_table: PrettyTable
) -> None:
assert aligned_before_table.get_string() == aligned_after_table.get_string()
def test_aligned_html(
self, aligned_before_table: PrettyTable, aligned_after_table: PrettyTable
) -> None:
assert (
aligned_before_table.get_html_string()
== aligned_after_table.get_html_string()
)
def test_aligned_latex(
self, aligned_before_table: PrettyTable, aligned_after_table: PrettyTable
) -> None:
assert (
aligned_before_table.get_latex_string()
== aligned_after_table.get_latex_string()
)
def test_aligned_mediawiki(
self, aligned_before_table: PrettyTable, aligned_after_table: PrettyTable
) -> None:
assert aligned_before_table.get_mediawiki_string(
header=True
) == aligned_after_table.get_mediawiki_string(header=True)
| TestAlignment |
python | django__django | tests/backends/sqlite/tests.py | {
"start": 10221,
"end": 10343
} | class ____(EscapingChecks):
pass
@unittest.skipUnless(connection.vendor == "sqlite", "SQLite tests")
| EscapingChecksDebug |
python | optuna__optuna | optuna/samplers/_tpe/probability_distributions.py | {
"start": 797,
"end": 2485
} | class ____(NamedTuple):
mu: np.ndarray
sigma: np.ndarray
low: float # Currently, low, high and step do not change per trial.
high: float
step: float
_BatchedDistributions = Union[
_BatchedCategoricalDistributions,
_BatchedTruncNormDistributions,
_BatchedTruncLogNormDistributions,
_BatchedDiscreteTruncNormDistributions,
_BatchedDiscreteTruncLogNormDistributions,
]
def _unique_inverse_2d(a: np.ndarray, b: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
This function is a quicker version of:
np.unique(np.concatenate([a[:, None], b[:, None]], axis=-1), return_inverse=True).
"""
assert a.shape == b.shape and len(a.shape) == 1
order = np.argsort(b)
# Stable sorting is required for the tie breaking.
order = order[np.argsort(a[order], kind="stable")]
a_order = a[order]
b_order = b[order]
is_first_occurrence = np.empty_like(a, dtype=bool)
is_first_occurrence[0] = True
is_first_occurrence[1:] = (a_order[1:] != a_order[:-1]) | (b_order[1:] != b_order[:-1])
inv = np.empty(a_order.size, dtype=int)
inv[order] = np.cumsum(is_first_occurrence) - 1
return a_order[is_first_occurrence], b_order[is_first_occurrence], inv
def _log_gauss_mass_unique(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""
This function reduces the log Gaussian probability mass computation by avoiding the
duplicated evaluations using the np.unique_inverse(...) equivalent operation.
"""
a_uniq, b_uniq, inv = _unique_inverse_2d(a.ravel(), b.ravel())
return _truncnorm._log_gauss_mass(a_uniq, b_uniq)[inv].reshape(a.shape)
| _BatchedDiscreteTruncLogNormDistributions |
python | langchain-ai__langchain | libs/partners/ollama/tests/unit_tests/test_auth.py | {
"start": 6852,
"end": 7870
} | class ____:
"""Test URL authentication integration with OllamaEmbeddings."""
@patch("langchain_ollama.embeddings.Client")
@patch("langchain_ollama.embeddings.AsyncClient")
def test_ollama_embeddings_url_auth_integration(
self, mock_async_client: MagicMock, mock_client: MagicMock
) -> None:
"""Test that OllamaEmbeddings properly handles URL authentication."""
url_with_auth = "https://user:password@ollama.example.com:11434"
OllamaEmbeddings(
model=MODEL_NAME,
base_url=url_with_auth,
)
expected_url = "https://ollama.example.com:11434"
expected_credentials = base64.b64encode(b"user:password").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
mock_client.assert_called_once_with(host=expected_url, headers=expected_headers)
mock_async_client.assert_called_once_with(
host=expected_url, headers=expected_headers
)
| TestOllamaEmbeddingsUrlAuth |
python | huggingface__transformers | src/transformers/models/phimoe/modular_phimoe.py | {
"start": 13172,
"end": 14958
} | class ____(MixtralForCausalLM):
def __init__(self, config):
super().__init__(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=self.config.lm_head_bias)
# Copied from transformers.models.phi3.modeling_phi3.Phi3ForCausalLM.prepare_inputs_for_generation
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the
# process
# When the first time input length reached long and short factor switching point, enforce re-compute cache
# It will cause downside of slower at this single token position, however, better than current failure.
if (
past_key_values
and hasattr(self.config, "original_max_position_embeddings")
and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1
):
past_length = cache_position[0]
if past_length <= self.config.original_max_position_embeddings:
past_key_values = None
model_inputs = super().prepare_inputs_for_generation(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
use_cache=use_cache,
logits_to_keep=logits_to_keep,
**kwargs,
)
return model_inputs
| PhimoeForCausalLM |
python | django__django | tests/test_utils/test_transactiontestcase.py | {
"start": 1797,
"end": 2361
} | class ____(TransactionTestCase):
available_apps = ["test_utils"]
def test_disallowed_database_queries(self):
message = (
"Database queries to 'other' are not allowed in this test. "
"Add 'other' to test_utils.test_transactiontestcase."
"DisallowedDatabaseQueriesTests.databases to ensure proper test "
"isolation and silence this failure."
)
with self.assertRaisesMessage(DatabaseOperationForbidden, message):
Car.objects.using("other").get()
| DisallowedDatabaseQueriesTests |
python | ionelmc__pytest-benchmark | src/pytest_benchmark/utils.py | {
"start": 929,
"end": 1748
} | class ____:
def __init__(self, target):
self.target = target
def __str__(self):
name = self.target.__module__ + '.' if hasattr(self.target, '__module__') else ''
name += self.target.__name__ if hasattr(self.target, '__name__') else repr(self.target)
return name
def __repr__(self):
return f'NameWrapper({self.target!r})'
def get_tag(project_name=None):
info = get_commit_info(project_name)
parts = [info['id'], get_current_time()]
if info['dirty']:
parts.append('uncommited-changes')
return '_'.join(parts)
def get_machine_id():
return '{}-{}-{}-{}'.format(
platform.system(),
platform.python_implementation(),
'.'.join(platform.python_version_tuple()[:2]),
platform.architecture()[0],
)
| NameWrapper |
python | optuna__optuna | optuna/storages/_rdb/alembic/versions/v2.4.0.a.py | {
"start": 1439,
"end": 1672
} | class ____(BaseModel):
__tablename__ = "trials"
trial_id = Column(Integer, primary_key=True)
number = Column(Integer)
study_id = Column(Integer, ForeignKey("studies.study_id"))
value = sa.Column(sa.Float)
| TrialModel |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 1470,
"end": 1522
} | class ____(TypedDict):
users: str
| PermissionsUsers |
python | python__mypy | mypyc/ir/rtypes.py | {
"start": 21574,
"end": 22931
} | class ____(RTypeVisitor[str]):
"""Produce a tuple name based on the concrete representations of types."""
def visit_rinstance(self, t: RInstance) -> str:
return "O"
def visit_runion(self, t: RUnion) -> str:
return "O"
def visit_rprimitive(self, t: RPrimitive) -> str:
if t._ctype == "CPyTagged":
return "I"
elif t._ctype == "char":
return "C"
elif t._ctype == "int64_t":
return "8" # "8 byte integer"
elif t._ctype == "int32_t":
return "4" # "4 byte integer"
elif t._ctype == "int16_t":
return "2" # "2 byte integer"
elif t._ctype == "uint8_t":
return "U1" # "1 byte unsigned integer"
elif t._ctype == "double":
return "F"
assert not t.is_unboxed, f"{t} unexpected unboxed type"
return "O"
def visit_rtuple(self, t: RTuple) -> str:
parts = [elem.accept(self) for elem in t.types]
return "T{}{}".format(len(parts), "".join(parts))
def visit_rstruct(self, t: RStruct) -> str:
assert False, "RStruct not supported in tuple"
def visit_rarray(self, t: RArray) -> str:
assert False, "RArray not supported in tuple"
def visit_rvoid(self, t: RVoid) -> str:
assert False, "rvoid in tuple?"
@final
| TupleNameVisitor |
python | sphinx-doc__sphinx | sphinx/directives/admonitions.py | {
"start": 1690,
"end": 1750
} | class ____(SphinxAdmonition):
node_class = nodes.note
| Note |
python | ray-project__ray | python/ray/tests/test_runtime_env_complicated.py | {
"start": 3722,
"end": 34795
} | class ____:
def get_emoji_version(self):
import emoji # noqa: E811
return emoji.__version__
check_remote_client_conda = """
import ray
context = (ray.client("localhost:24001")
.env({{"conda" : "package-{package_version}"}})
.connect())
@ray.remote
def get_package_version():
import emoji
return emoji.__version__
assert ray.get(get_package_version.remote()) == "{package_version}"
context.disconnect()
"""
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment",
)
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 24001 --port 0"],
indirect=True,
)
def test_client_tasks_and_actors_inherit_from_driver(conda_envs, call_ray_start):
for i, package_version in enumerate(EMOJI_VERSIONS):
runtime_env = {"conda": f"package-{package_version}"}
with ray.client("localhost:24001").env(runtime_env).connect():
assert ray.get(get_emoji_version.remote()) == package_version
actor_handle = VersionActor.remote()
assert ray.get(actor_handle.get_emoji_version.remote()) == package_version
# Ensure that we can have a second client connect using the other
# conda environment.
other_package_version = EMOJI_VERSIONS[(i + 1) % 2]
run_string_as_driver(
check_remote_client_conda.format(package_version=other_package_version)
)
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment",
)
def test_task_actor_conda_env(conda_envs, shutdown_only):
ray.init()
# Basic conda runtime env
for package_version in EMOJI_VERSIONS:
runtime_env = {"conda": f"package-{package_version}"}
task = get_emoji_version.options(runtime_env=runtime_env)
assert ray.get(task.remote()) == package_version
actor = VersionActor.options(runtime_env=runtime_env).remote()
assert ray.get(actor.get_emoji_version.remote()) == package_version
# Runtime env should inherit to nested task
@ray.remote
def wrapped_version():
return ray.get(get_emoji_version.remote())
@ray.remote
class Wrapper:
def wrapped_version(self):
return ray.get(get_emoji_version.remote())
for package_version in EMOJI_VERSIONS:
runtime_env = {"conda": f"package-{package_version}"}
task = wrapped_version.options(runtime_env=runtime_env)
assert ray.get(task.remote()) == package_version
actor = Wrapper.options(runtime_env=runtime_env).remote()
assert ray.get(actor.wrapped_version.remote()) == package_version
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment",
)
def test_base_full_path(conda_envs, shutdown_only):
"""
Test that `base` and its absolute path prefix can both work.
"""
ray.init()
conda_info = get_conda_info_json()
prefix = conda_info["conda_prefix"]
test_conda_envs = ["base", prefix]
@ray.remote
def get_conda_env_name():
return os.environ.get("CONDA_DEFAULT_ENV")
# Basic conda runtime env
for conda_env in test_conda_envs:
runtime_env = {"conda": conda_env}
task = get_conda_env_name.options(runtime_env=runtime_env)
assert ray.get(task.remote()) == "base"
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment",
)
def test_task_actor_conda_env_full_path(conda_envs, shutdown_only):
ray.init()
conda_info = get_conda_info_json()
prefix = conda_info["conda_prefix"]
test_conda_envs = {
package_version: f"{prefix}/envs/package-{package_version}"
for package_version in EMOJI_VERSIONS
}
# Basic conda runtime env
for package_version, conda_full_path in test_conda_envs.items():
runtime_env = {"conda": conda_full_path}
print(f"Testing {package_version}, runtime env: {runtime_env}")
task = get_emoji_version.options(runtime_env=runtime_env)
assert ray.get(task.remote()) == package_version
actor = VersionActor.options(runtime_env=runtime_env).remote()
assert ray.get(actor.get_emoji_version.remote()) == package_version
# Runtime env should inherit to nested task
@ray.remote
def wrapped_version():
return ray.get(get_emoji_version.remote())
@ray.remote
class Wrapper:
def wrapped_version(self):
return ray.get(get_emoji_version.remote())
for package_version, conda_full_path in test_conda_envs.items():
runtime_env = {"conda": conda_full_path}
task = wrapped_version.options(runtime_env=runtime_env)
assert ray.get(task.remote()) == package_version
actor = Wrapper.options(runtime_env=runtime_env).remote()
assert ray.get(actor.wrapped_version.remote()) == package_version
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment",
)
def test_task_conda_env_validation_cached(conda_envs, shutdown_only):
"""Verify that when a task is running with the same conda env
it doesn't validate if env exists.
"""
# The first run would be slower because we need to validate
# if the package exists.
ray.init()
version = EMOJI_VERSIONS[0]
runtime_env = {"conda": f"package-{version}"}
task = get_emoji_version.options(runtime_env=runtime_env)
s = time.time()
ray.get(task.remote())
first_run = time.time() - s
# Typically takes 1~2 seconds.
print("First run took", first_run)
# We should verify this doesn't happen
# from the second run.
s = time.time()
for _ in range(10):
ray.get(task.remote())
second_10_runs = time.time() - s
# Typicall takes less than 100ms.
print("second 10 runs took", second_10_runs)
assert second_10_runs < first_run
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment",
)
def test_job_config_conda_env(conda_envs, shutdown_only):
for package_version in EMOJI_VERSIONS:
runtime_env = {"conda": f"package-{package_version}"}
ray.init(runtime_env=runtime_env)
assert ray.get(get_emoji_version.remote()) == package_version
ray.shutdown()
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment",
)
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
@pytest.mark.parametrize("runtime_env_class", [dict, RuntimeEnv])
def test_job_eager_install(shutdown_only, runtime_env_class):
# Test enable eager install. This flag is set to True by default.
runtime_env = {"conda": {"dependencies": ["toolz"]}}
env_count = len(get_conda_env_list())
ray.init(runtime_env=runtime_env_class(**runtime_env))
wait_for_condition(lambda: len(get_conda_env_list()) == env_count + 1, timeout=60)
ray.shutdown()
# Test disable eager install
runtime_env = {
"conda": {"dependencies": ["toolz"]},
"config": {"eager_install": False},
}
ray.init(runtime_env=runtime_env_class(**runtime_env))
with pytest.raises(RuntimeError):
wait_for_condition(
lambda: len(get_conda_env_list()) == env_count + 2, timeout=5
)
ray.shutdown()
# Test unavailable type
runtime_env = {
"conda": {"dependencies": ["toolz"]},
"config": {"eager_install": 123},
}
with pytest.raises(TypeError):
ray.init(runtime_env=runtime_env_class(**runtime_env))
ray.shutdown()
def test_get_conda_env_dir(tmp_path):
"""
Typical output of `conda env list`, for context:
base /Users/scaly/anaconda3
my_env_1 /Users/scaly/anaconda3/envs/my_env_1
For this test, `tmp_path` is a stand-in for `Users/scaly/anaconda3`.
"""
# Simulate starting in an env named tf1.
d = tmp_path / "envs" / "tf1"
Path.mkdir(d, parents=True)
with mock.patch.dict(
os.environ, {"CONDA_PREFIX": str(d), "CONDA_DEFAULT_ENV": "tf1"}
):
with pytest.raises(ValueError):
# Env tf2 should not exist.
env_dir = get_conda_env_dir("tf2")
tf2_dir = tmp_path / "envs" / "tf2"
Path.mkdir(tf2_dir, parents=True)
env_dir = get_conda_env_dir("tf2")
assert env_dir == str(tmp_path / "envs" / "tf2")
# Simulate starting in (base) conda env.
with mock.patch.dict(
os.environ, {"CONDA_PREFIX": str(tmp_path), "CONDA_DEFAULT_ENV": "base"}
):
with pytest.raises(ValueError):
# Env tf3 should not exist.
env_dir = get_conda_env_dir("tf3")
# Env tf2 still should exist.
env_dir = get_conda_env_dir("tf2")
assert env_dir == str(tmp_path / "envs" / "tf2")
@pytest.mark.skipif(
os.environ.get("CONDA_DEFAULT_ENV") is None,
reason="must be run from within a conda environment",
)
def test_get_conda_envs(conda_envs):
"""
Tests that we can at least find 3 conda envs: base, and two envs we created.
"""
conda_info = get_conda_info_json()
envs = get_conda_envs(conda_info)
prefix = conda_info["conda_prefix"]
assert ("base", prefix) in envs
assert ("package-2.1.0", prefix + "/envs/package-2.1.0") in envs
assert ("package-2.2.0", prefix + "/envs/package-2.2.0") in envs
@pytest.mark.skipif(
os.environ.get("CONDA_EXE") is None,
reason="Requires properly set-up conda shell",
)
def test_conda_create_task(shutdown_only):
"""Tests dynamic creation of a conda env in a task's runtime env. Assumes
`conda init` has been successfully called."""
ray.init()
runtime_env = {
"conda": {"dependencies": ["pip", {"pip": ["pip-install-test==0.5"]}]}
}
@ray.remote
def f():
import pip_install_test # noqa
return True
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
with pytest.raises(ray.exceptions.RayTaskError) as excinfo:
ray.get(f.remote())
assert "ModuleNotFoundError" in str(excinfo.value)
assert ray.get(f.options(runtime_env=runtime_env).remote())
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
@pytest.mark.skipif(
os.environ.get("CONDA_EXE") is None,
reason="Requires properly set-up conda shell",
)
def test_conda_create_job_config(shutdown_only):
"""Tests dynamic conda env creation in a runtime env in the JobConfig."""
runtime_env = {
"conda": {"dependencies": ["pip", {"pip": ["pip-install-test==0.5"]}]}
}
ray.init(runtime_env=runtime_env)
@ray.remote
def f():
import pip_install_test # noqa
return True
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
assert ray.get(f.remote())
def test_inject_dependencies():
num_tests = 4
conda_dicts = [None] * num_tests
outputs = [None] * num_tests
conda_dicts[0] = {}
outputs[0] = {"dependencies": ["python=7.8", "pip", {"pip": ["ray==1.2.3"]}]}
conda_dicts[1] = {"dependencies": ["blah"]}
outputs[1] = {
"dependencies": ["blah", "python=7.8", "pip", {"pip": ["ray==1.2.3"]}]
}
conda_dicts[2] = {"dependencies": ["blah", "pip"]}
outputs[2] = {
"dependencies": ["blah", "pip", "python=7.8", {"pip": ["ray==1.2.3"]}]
}
conda_dicts[3] = {"dependencies": ["blah", "pip", {"pip": ["some_pkg"]}]}
outputs[3] = {
"dependencies": [
"blah",
"pip",
{"pip": ["ray==1.2.3", "some_pkg"]},
"python=7.8",
]
}
for i in range(num_tests):
output = inject_dependencies(conda_dicts[i], "7.8", ["ray==1.2.3"])
error_msg = (
f"failed on input {i}."
f"Output: {output} \n"
f"Expected output: {outputs[i]}"
)
assert output == outputs[i], error_msg
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 24001 --port 0"],
indirect=True,
)
def test_conda_create_ray_client(call_ray_start):
"""Tests dynamic conda env creation in RayClient."""
runtime_env = {
"conda": {"dependencies": ["pip", {"pip": ["pip-install-test==0.5"]}]}
}
@ray.remote
def f():
import pip_install_test # noqa
return True
with ray.client("localhost:24001").env(runtime_env).connect():
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
assert ray.get(f.remote())
with ray.client("localhost:24001").connect():
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed in a client that doesn't
# use the runtime_env
ray.get(f.remote())
@pytest.mark.parametrize("pip_as_str", [True, False])
def test_pip_task(shutdown_only, pip_as_str, tmp_path):
"""Tests pip installs in the runtime env specified in f.options()."""
ray.init()
if pip_as_str:
d = tmp_path / "pip_requirements"
d.mkdir()
p = d / "requirements.txt"
requirements_txt = """
pip-install-test==0.5
"""
p.write_text(requirements_txt)
runtime_env = {"pip": str(p)}
else:
runtime_env = {"pip": ["pip-install-test==0.5"]}
@ray.remote
def f():
import pip_install_test # noqa
return True
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
with pytest.raises(ray.exceptions.RayTaskError) as excinfo:
ray.get(f.remote())
assert "ModuleNotFoundError" in str(excinfo.value)
assert ray.get(f.options(runtime_env=runtime_env).remote())
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
@pytest.mark.parametrize("option", ["conda", "pip"])
def test_conda_pip_extras_ray_default(shutdown_only, option):
"""Tests that ray[extras] can be included as a conda/pip dependency."""
ray.init()
pip = ["pip-install-test==0.5", "ray[default]"]
if option == "conda":
runtime_env = {"conda": {"dependencies": ["pip", {"pip": pip}]}}
elif option == "pip":
runtime_env = {"pip": pip}
else:
assert False, f"Unknown option: {option}"
@ray.remote
def f():
import pip_install_test # noqa
return True
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
with pytest.raises(ray.exceptions.RayTaskError) as excinfo:
ray.get(f.remote())
assert "ModuleNotFoundError" in str(excinfo.value)
assert ray.get(f.options(runtime_env=runtime_env).remote())
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
@pytest.mark.parametrize("pip_as_str", [True, False])
def test_pip_job_config(shutdown_only, pip_as_str, tmp_path):
"""Tests dynamic installation of pip packages in a task's runtime env."""
if pip_as_str:
d = tmp_path / "pip_requirements"
d.mkdir()
p = d / "requirements.txt"
requirements_txt = """
pip-install-test==0.5
"""
p.write_text(requirements_txt)
runtime_env = {"pip": str(p)}
else:
runtime_env = {"pip": ["pip-install-test==0.5"]}
ray.init(runtime_env=runtime_env)
@ray.remote
def f():
import pip_install_test # noqa
return True
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
assert ray.get(f.remote())
@pytest.mark.skipif(_WIN32, reason="Fails on windows")
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 24001 --port 0"],
indirect=True,
)
def test_client_working_dir_filepath(call_ray_start, tmp_path):
"""Test that pip and conda filepaths work with working_dir."""
working_dir = tmp_path / "requirements"
working_dir.mkdir()
pip_file = working_dir / "requirements.txt"
requirements_txt = """
pip-install-test==0.5
"""
pip_file.write_text(requirements_txt)
runtime_env_pip = {"working_dir": str(working_dir), "pip": str(pip_file)}
conda_file = working_dir / "environment.yml"
conda_dict = {"dependencies": ["pip", {"pip": ["pip-install-test==0.5"]}]}
conda_str = yaml.dump(conda_dict)
conda_file.write_text(conda_str)
runtime_env_conda = {"working_dir": str(working_dir), "conda": str(conda_file)}
@ray.remote
def f():
import pip_install_test # noqa
return True
with ray.client("localhost:24001").connect():
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed in a client that doesn't
# use the runtime_env
ray.get(f.remote())
for runtime_env in [runtime_env_pip, runtime_env_conda]:
with ray.client("localhost:24001").env(runtime_env).connect():
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
assert ray.get(f.remote())
@pytest.mark.skipif(_WIN32, reason="Hangs on windows")
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 24001 --port 0"],
indirect=True,
)
def test_conda_pip_filepaths_remote(call_ray_start, tmp_path):
"""Test that pip and conda filepaths work, simulating a remote cluster."""
working_dir = tmp_path / "requirements"
working_dir.mkdir()
pip_file = working_dir / "requirements.txt"
requirements_txt = """
pip-install-test==0.5
"""
pip_file.write_text(requirements_txt)
runtime_env_pip = {"pip": str(pip_file)}
conda_file = working_dir / "environment.yml"
conda_dict = {"dependencies": ["pip", {"pip": ["pip-install-test==0.5"]}]}
conda_str = yaml.dump(conda_dict)
conda_file.write_text(conda_str)
runtime_env_conda = {"conda": str(conda_file)}
@ray.remote
def f():
import pip_install_test # noqa
return True
with ray.client("localhost:24001").connect():
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed in a client that doesn't
# use the runtime_env
ray.get(f.remote())
# pip and conda files should be parsed when the function is declared.
f_pip = f.options(runtime_env=runtime_env_pip)
f_conda = f.options(runtime_env=runtime_env_conda)
# Remove the pip and conda files from the local filesystem. This is
# necessary to simulate the files not being present on the remote cluster,
# because in this single-machine test, the cluster has the same filesystem.
os.remove(pip_file)
os.remove(conda_file)
# Test with and without a working_dir.
client_envs = [{}, {"working_dir": str(working_dir)}]
for runtime_env in client_envs:
with ray.client("localhost:24001").env(runtime_env).connect():
with pytest.raises(ModuleNotFoundError):
# Ensure pip-install-test is not installed on the test machine
import pip_install_test # noqa
assert ray.get(f_pip.remote()), str(runtime_env)
assert ray.get(f_conda.remote()), str(runtime_env)
install_env_script = """
import ray
import time
ray.init(address="auto", runtime_env={env})
@ray.remote
def f():
return "hello"
f.remote()
# Give the env 5 seconds to begin installing in a new worker.
time.sleep(5)
"""
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
def test_env_installation_nonblocking(shutdown_only):
"""Test fix for https://github.com/ray-project/ray/issues/16226."""
env1 = {"pip": ["pip-install-test==0.5"]}
ray.init(runtime_env=env1)
@ray.remote
def f():
return "hello"
# Warm up a worker because it takes time to start.
ray.get(f.remote())
def assert_tasks_finish_quickly(total_sleep_s=0.1):
"""Call f every 0.01 seconds for total time total_sleep_s."""
gap_s = 0.01
for i in range(int(total_sleep_s / gap_s)):
start = time.time()
ray.get(f.remote())
# Env installation takes around 10 to 60 seconds. If we fail the
# below assert, we can be pretty sure an env installation blocked
# the task.
assert time.time() - start < 2.0
time.sleep(gap_s)
assert_tasks_finish_quickly()
env2 = {"pip": ["pip-install-test==0.5", "requests"]}
f.options(runtime_env=env2).remote()
# Check that installing env2 above does not block tasks using env1.
assert_tasks_finish_quickly()
proc = run_string_as_driver_nonblocking(install_env_script.format(env=env1))
# Check that installing env1 in a new worker in the script above does not
# block other tasks that use env1.
assert_tasks_finish_quickly(total_sleep_s=5)
proc.kill()
proc.wait()
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
def test_simultaneous_install(shutdown_only):
"""Test that two envs can be installed without affecting each other."""
ray.init()
@ray.remote
class VersionWorker:
def __init__(self, key):
self.key = key
def get(self):
import emoji
return (self.key, emoji.__version__)
# Before we used a global lock on conda installs, these two envs would be
# installed concurrently, leading to errors:
# https://github.com/ray-project/ray/issues/17086
# Now we use a global lock, so the envs are installed sequentially.
worker_1 = VersionWorker.options(
runtime_env={"pip": {"packages": ["emoji==2.1.0"], "pip_check": False}}
).remote(key=1)
worker_2 = VersionWorker.options(
runtime_env={"pip": {"packages": ["emoji==2.2.0"], "pip_check": False}}
).remote(key=2)
assert ray.get(worker_1.get.remote()) == (1, "2.1.0")
assert ray.get(worker_2.get.remote()) == (2, "2.2.0")
CLIENT_SERVER_PORT = 24001
@pytest.mark.skipif(_WIN32, reason="Fails on windows")
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
# Skip on Linux ARM64 as the test times out. This is probably because some
# dependencies may not be available for arm64 and must be compiled from source.
@pytest.mark.skipif(
sys.platform == "linux" and platform.processor() == "aarch64",
reason="This test is currently not supported on Linux ARM64",
)
@pytest.mark.parametrize(
"call_ray_start",
[f"ray start --head --ray-client-server-port {CLIENT_SERVER_PORT} --port 0"],
indirect=True,
)
def test_e2e_complex(call_ray_start, tmp_path):
"""Test multiple runtime_env options across multiple client connections.
1. Run a Ray Client job with both working_dir and pip specified. Check the
environment using imports and file reads in tasks and actors.
2. On the same cluster, run another job with a requirements.txt file and
overriding per-actor and per-task pip requirements.
"""
# Create a file to use to test working_dir
specific_path = tmp_path / "test"
specific_path.write_text("Hello")
with ray.client(f"localhost:{CLIENT_SERVER_PORT}").env(
{"working_dir": str(tmp_path), "pip": ["pip-install-test"]}
).connect():
# Test that a task is started in the working_dir.
@ray.remote
def test_read():
return Path("./test").read_text()
assert ray.get(test_read.remote()) == "Hello"
# Check a task has the job's pip requirements and working_dir.
@ray.remote
def test_pip():
import pip_install_test # noqa
import ray # noqa
return Path("./test").read_text()
assert ray.get(test_pip.remote()) == "Hello"
# Check an actor has the job's pip requirements and working_dir.
@ray.remote
class TestActor:
def test(self):
import pip_install_test # noqa
return Path("./test").read_text()
a = TestActor.remote()
assert ray.get(a.test.remote()) == "Hello"
pandas_version = "1.5.3"
if sys.version_info.major >= 3 and sys.version_info.minor >= 11:
pandas_version = "2.2.3"
requirement_path = tmp_path / "requirements.txt"
requirement_path.write_text(
"\n".join(
[
"PyGithub",
f"pandas=={pandas_version}",
"typer",
"aiofiles",
]
)
)
# Start a new job on the same cluster using the requirements file.
with ray.client(f"localhost:{CLIENT_SERVER_PORT}").env(
{"working_dir": str(tmp_path), "pip": str(requirement_path)}
).connect():
@ray.remote
def test_read():
return Path("./test").read_text()
assert ray.get(test_read.remote()) == "Hello"
# Check that a task has the job's pip requirements and working_dir.
@ray.remote
def test_import():
import ray # noqa
import typer # noqa
return Path("./test").read_text()
assert ray.get(test_import.remote()) == "Hello"
# Check that an actor has the job's pip requirements and working_dir.
@ray.remote
class TestActor:
def test(self):
import ray # noqa
import typer # noqa
return Path("./test").read_text()
a = TestActor.options(runtime_env={"pip": str(requirement_path)}).remote()
assert ray.get(a.test.remote()) == "Hello"
# Check that per-task pip specification works and that the job's
# working_dir is still inherited.
@ray.remote
def test_pip():
import pip_install_test # noqa
return Path("./test").read_text()
assert (
ray.get(
test_pip.options(runtime_env={"pip": ["pip-install-test"]}).remote()
)
== "Hello"
)
# Check that pip_install_test is not in the job's pip requirements.
with pytest.raises(ray.exceptions.RayTaskError) as excinfo:
ray.get(test_pip.remote())
assert "ModuleNotFoundError" in str(excinfo.value)
# Check that per-actor pip specification works and that the job's
# working_dir is still inherited.
@ray.remote
class TestActor:
def test(self):
import pip_install_test # noqa
return Path("./test").read_text()
a = TestActor.options(runtime_env={"pip": ["pip-install-test"]}).remote()
assert ray.get(a.test.remote()) == "Hello"
@pytest.mark.skipif(_WIN32, reason="Fails on windows")
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
def test_runtime_env_override(call_ray_start):
# https://github.com/ray-project/ray/issues/16481
with tempfile.TemporaryDirectory() as tmpdir, chdir(tmpdir):
ray.init(address="auto", namespace="test")
@ray.remote
class Child:
def getcwd(self):
import os
return os.getcwd()
def read(self, path):
return open(path).read()
def ready(self):
pass
@ray.remote
class Parent:
def spawn_child(self, name, runtime_env):
child = Child.options(
lifetime="detached", name=name, runtime_env=runtime_env
).remote()
ray.get(child.ready.remote())
Parent.options(lifetime="detached", name="parent").remote()
ray.shutdown()
with open("hello", "w") as f:
f.write("world")
job_config = ray.job_config.JobConfig(runtime_env={"working_dir": "."})
ray.init(address="auto", namespace="test", job_config=job_config)
os.remove("hello")
parent = ray.get_actor("parent")
env = ray.get_runtime_context().runtime_env
print("Spawning with env:", env)
ray.get(parent.spawn_child.remote("child", env))
child = ray.get_actor("child")
child_cwd = ray.get(child.getcwd.remote())
# Child should be in tmp runtime resource dir.
assert child_cwd != os.getcwd(), (child_cwd, os.getcwd())
assert ray.get(child.read.remote("hello")) == "world"
ray.shutdown()
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="This test is only run on linux CI machines.",
)
def test_pip_with_env_vars(start_cluster, tmp_path):
"""
The file structure:
$tmp_path/
│
├── setup.py
├── dist/ # the tar.gz file will be generated here
└── test_package/
└── test.py
"""
with chdir(tmp_path):
TEST_ENV_NAME = "TEST_ENV_VARS"
TEST_ENV_VALUE = "TEST"
package_name = "test_package"
package_version = "0.0.1"
package_dir = tmp_path
try_to_create_directory(os.path.join(package_dir, package_name))
setup_filename = os.path.join(package_dir, "setup.py")
setup_code = """import os
from setuptools import setup, find_packages
from setuptools.command.install import install
| VersionActor |
python | pypa__setuptools | setuptools/_vendor/autocommand/automain.py | {
"start": 777,
"end": 2076
} | class ____(AutocommandError, TypeError):
pass
def automain(module, *, args=(), kwargs=None):
'''
This decorator automatically invokes a function if the module is being run
as the "__main__" module. Optionally, provide args or kwargs with which to
call the function. If `module` is "__main__", the function is called, and
the program is `sys.exit`ed with the return value. You can also pass `True`
to cause the function to be called unconditionally. If the function is not
called, it is returned unchanged by the decorator.
Usage:
@automain(__name__) # Pass __name__ to check __name__=="__main__"
def main():
...
If __name__ is "__main__" here, the main function is called, and then
sys.exit called with the return value.
'''
# Check that @automain(...) was called, rather than @automain
if callable(module):
raise AutomainRequiresModuleError(module)
if module == '__main__' or module is True:
if kwargs is None:
kwargs = {}
# Use a function definition instead of a lambda for a neater traceback
def automain_decorator(main):
sys.exit(main(*args, **kwargs))
return automain_decorator
else:
return lambda main: main
| AutomainRequiresModuleError |
python | ipython__ipython | IPython/core/history.py | {
"start": 19436,
"end": 19613
} | class ____:
output_type: typing.Literal[
"out_stream", "err_stream", "display_data", "execute_result"
]
bundle: typing.Dict[str, str | list[str]]
| HistoryOutput |
python | doocs__leetcode | solution/0300-0399/0342.Power of Four/Solution.py | {
"start": 0,
"end": 131
} | class ____:
def isPowerOfFour(self, n: int) -> bool:
return n > 0 and (n & (n - 1)) == 0 and (n & 0xAAAAAAAA) == 0
| Solution |
python | django-haystack__django-haystack | test_haystack/test_fields.py | {
"start": 19508,
"end": 20125
} | class ____(TestCase):
def test_init(self):
try:
foo = FacetIntegerField(model_attr="foo")
foo_exact = FacetIntegerField(facet_for="bar")
except:
self.fail()
self.assertEqual(foo.facet_for, None)
self.assertEqual(foo_exact.null, True)
self.assertEqual(foo_exact.facet_for, "bar")
def test_prepare(self):
mock = MockModel()
mock.user = "daniel"
mock.view_count = 13
view_count = FacetIntegerField(model_attr="view_count")
self.assertEqual(view_count.prepare(mock), 13)
| FacetIntegerFieldTestCase |
python | facelessuser__pymdown-extensions | pymdownx/magiclink.py | {
"start": 36628,
"end": 37728
} | class ____(_MagiclinkReferencePattern):
"""Convert #1, !1, and commit_hash."""
ANCESTOR_EXCLUDES = ('a',)
def handleMatch(self, m, data):
"""Handle email link patterns."""
# We don't have a valid provider, user, and repo, reject
if not self.user or not self.repo:
return None, None, None
is_commit = m.group('commit')
is_diff = m.group('diff')
value = m.group('commit') if is_commit else m.group('issue')
value2 = m.group('diff') if is_diff else None
repo = self.repo
user = self.user
provider = self.provider
self.my_repo = True
self.my_user = True
el = etree.Element("a")
if is_diff:
self.process_compare(el, provider, user, repo, value, value2)
elif is_commit:
self.process_commit(el, provider, user, repo, value)
else:
if not self.process_issues(el, provider, user, repo, value):
return m.group(0), m.start(0), m.end(0)
return el, m.start(0), m.end(0)
| MagiclinkInternalRefsPattern |
python | yaml__pyyaml | lib/yaml/constructor.py | {
"start": 314,
"end": 6501
} | class ____:
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def check_state_key(self, key):
"""Block special attributes/methods from being set in a newly created
object, to prevent user-controlled methods from being called during
deserialization"""
if self.get_state_keys_blacklist_regexp().match(key):
raise ConstructorError(None, None,
"blacklisted key '%s' in instance state found" % (key,), None)
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if node in self.constructed_objects:
return self.constructed_objects[node]
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if tag_prefix is not None and node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = next(generator)
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if not isinstance(key, collections.abc.Hashable):
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
@classmethod
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
@classmethod
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
| BaseConstructor |
python | walkccc__LeetCode | solutions/748. Shortest Completing Word/748.py | {
"start": 0,
"end": 527
} | class ____:
def shortestCompletingWord(self, licensePlate: str, words: list[str]) -> str:
def isMatch(word: str) -> bool:
wordCount = collections.Counter(word)
return False if any(
wordCount[i] < count[i] for i in string.ascii_letters) else True
ans = '*' * 16
count = collections.defaultdict(int)
for c in licensePlate:
if c.isalpha():
count[c.lower()] += 1
for word in words:
if len(word) < len(ans) and isMatch(word):
ans = word
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/pegasus_x/modeling_pegasus_x.py | {
"start": 27933,
"end": 32986
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: PegasusXConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusXAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
bias=False,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = PegasusXAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
bias=False,
config=config,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache: Whether to us KV cache for decoding
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
| PegasusXDecoderLayer |
python | vyperlang__vyper | vyper/venom/analysis/cfg.py | {
"start": 212,
"end": 4501
} | class ____(IRAnalysis):
"""
Compute control flow graph information for each basic block in the function.
"""
_dfs: OrderedSet[IRBasicBlock]
_cfg_in: MutableMapping[IRBasicBlock, OrderedSet[IRBasicBlock]]
_cfg_out: MutableMapping[IRBasicBlock, OrderedSet[IRBasicBlock]]
_reachable: MutableMapping[IRBasicBlock, bool]
def analyze(self) -> None:
fn = self.function
self._dfs = OrderedSet()
# use weak key dictionary since if a bb gets removed (for being
# unreachable), it should fall out of the cfg analysis.
self._cfg_in = WeakKeyDictionary()
self._cfg_out = WeakKeyDictionary()
self._reachable = WeakKeyDictionary()
for bb in fn.get_basic_blocks():
self._cfg_in[bb] = OrderedSet()
self._cfg_out[bb] = OrderedSet()
self._reachable[bb] = False
for bb in fn.get_basic_blocks():
# order of cfg_out matters to performance!
for next_bb in reversed(bb.out_bbs):
self._cfg_out[bb].add(next_bb)
self._cfg_in[next_bb].add(bb)
self._compute_dfs_post_r(self.function.entry)
def add_cfg_in(self, bb: IRBasicBlock, pred: IRBasicBlock):
self._cfg_in[bb].add(pred)
def add_cfg_out(self, bb, succ):
self._cfg_out[bb].add(succ)
def remove_cfg_in(self, bb: IRBasicBlock, pred: IRBasicBlock):
self._cfg_in[bb].remove(pred)
def remove_cfg_out(self, bb: IRBasicBlock, succ: IRBasicBlock):
self._cfg_out[bb].remove(succ)
def cfg_in(self, bb: IRBasicBlock) -> OrderedSet[IRBasicBlock]:
return self._cfg_in[bb]
def cfg_out(self, bb: IRBasicBlock) -> OrderedSet[IRBasicBlock]:
return self._cfg_out[bb]
def is_reachable(self, bb: IRBasicBlock) -> bool:
return self._reachable[bb]
def is_normalized(self) -> bool:
"""
Check if function is normalized. A function is normalized if in the
CFG, no basic block simultaneously has multiple inputs and outputs.
That is, a basic block can be jumped to *from* multiple blocks, or it
can jump *to* multiple blocks, but it cannot simultaneously do both.
Having a normalized CFG makes calculation of stack layout easier when
emitting assembly.
"""
for bb in self.function.get_basic_blocks():
# Ignore if there are no multiple predecessors
if len(self._cfg_in[bb]) <= 1:
continue
# Check if there is a branching jump at the end
# of one of the predecessors
for in_bb in self._cfg_in[bb]:
if len(self._cfg_out[in_bb]) > 1:
return False
# The function is normalized
return True
def _compute_dfs_post_r(self, bb):
if self._reachable[bb]:
return
self._reachable[bb] = True
for out_bb in self._cfg_out[bb]:
self._compute_dfs_post_r(out_bb)
self._dfs.add(bb)
@property
def dfs_pre_walk(self) -> Iterator[IRBasicBlock]:
visited: OrderedSet[IRBasicBlock] = OrderedSet()
def _visit_dfs_pre_r(bb: IRBasicBlock):
if bb in visited:
return
visited.add(bb)
yield bb
for out_bb in self._cfg_out[bb]:
yield from _visit_dfs_pre_r(out_bb)
yield from _visit_dfs_pre_r(self.function.entry)
@property
def dfs_post_walk(self) -> Iterator[IRBasicBlock]:
return iter(self._dfs)
def invalidate(self):
from vyper.venom.analysis import (
DFGAnalysis,
DominatorTreeAnalysis,
LivenessAnalysis,
ReachableAnalysis,
)
# just in case somebody is holding onto a bad reference to this
del self._cfg_in
del self._cfg_out
del self._reachable
del self._dfs
# just to be on the safe side, but this is probably not needed.
self.analyses_cache.invalidate_analysis(DFGAnalysis)
self.analyses_cache.invalidate_analysis(DominatorTreeAnalysis)
self.analyses_cache.invalidate_analysis(LivenessAnalysis)
self.analyses_cache.invalidate_analysis(ReachableAnalysis)
| CFGAnalysis |
python | matplotlib__matplotlib | lib/matplotlib/colors.py | {
"start": 46036,
"end": 50478
} | class ____(Colormap):
"""
Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
Parameters
----------
colors : list of :mpltype:`color` or array
Sequence of Matplotlib color specifications (color names or RGB(A)
values).
name : str, optional
String to identify the colormap.
N : int, optional
Number of entries in the map. The default is *None*, in which case
there is one colormap entry for each element in the list of colors.
If ::
N < len(colors)
the list will be truncated at *N*. If ::
N > len(colors)
the list will be extended by repetition.
.. deprecated:: 3.11
This parameter will be removed. Please instead ensure that
the list of passed colors is the required length.
bad : :mpltype:`color`, default: transparent
The color for invalid values (NaN or masked).
.. versionadded:: 3.11
under : :mpltype:`color`, default: color of the lowest value
The color for low out-of-range values.
.. versionadded:: 3.11
over : :mpltype:`color`, default: color of the highest value
The color for high out-of-range values.
.. versionadded:: 3.11
"""
@_api.delete_parameter(
"3.11", "N",
message="Passing 'N' to ListedColormap is deprecated since %(since)s "
"and will be removed in %(removal)s. Please ensure the list "
"of passed colors is the required length instead."
)
def __init__(self, colors, name='unnamed', N=None, *,
bad=None, under=None, over=None):
if N is None:
self.colors = colors
N = len(colors)
else:
if isinstance(colors, str):
self.colors = [colors] * N
elif np.iterable(colors):
self.colors = list(
itertools.islice(itertools.cycle(colors), N))
else:
try:
gray = float(colors)
except TypeError:
pass
else:
self.colors = [gray] * N
super().__init__(name, N, bad=bad, under=under, over=over)
def _init(self):
self._lut = np.zeros((self.N + 3, 4), float)
self._lut[:-3] = to_rgba_array(self.colors)
self._isinit = True
self._update_lut_extremes()
@property
def monochrome(self):
"""Return whether all colors in the colormap are identical."""
# Replacement for the attribute *monochrome*. This ensures a consistent
# response independent of the way the ListedColormap was created, which
# was not the case for the manually set attribute.
#
# TODO: It's a separate discussion whether we need this property on
# colormaps at all (at least as public API). It's a very special edge
# case and we only use it for contours internally.
self._ensure_inited()
return self.N <= 1 or np.all(self._lut[0] == self._lut[1:self.N])
def resampled(self, lutsize):
"""Return a new colormap with *lutsize* entries."""
colors = self(np.linspace(0, 1, lutsize))
new_cmap = ListedColormap(colors, name=self.name)
# Keep the over/under values too
new_cmap._rgba_over = self._rgba_over
new_cmap._rgba_under = self._rgba_under
new_cmap._rgba_bad = self._rgba_bad
return new_cmap
def reversed(self, name=None):
"""
Return a reversed instance of the Colormap.
Parameters
----------
name : str, optional
The name for the reversed colormap. If None, the
name is set to ``self.name + "_r"``.
Returns
-------
ListedColormap
A reversed instance of the colormap.
"""
if name is None:
name = self.name + "_r"
colors_r = list(reversed(self.colors))
new_cmap = ListedColormap(colors_r, name=name)
# Reverse the over/under values too
new_cmap._rgba_over = self._rgba_under
new_cmap._rgba_under = self._rgba_over
new_cmap._rgba_bad = self._rgba_bad
return new_cmap
| ListedColormap |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_stateful.py | {
"start": 9226,
"end": 10256
} | class ____(RuleBasedStateMachine):
values = Bundle("values")
def __init__(self):
super().__init__()
self.called = False
@rule(target=values, value=st.just([]) | st.lists(values))
def f(self, value):
assert not self.called
# ensure we get two calls to f before failing. In the minimal failing
# example, both will be from st.just([]).
self.called = True
return value
def test_replaces_when_same_id():
assert_runs_to_output(
SourceSameAsTarget,
f"""
state = {SourceSameAsTarget.__name__}()
values_0 = state.f(value=[])
state.f(value=[values_0])
state.teardown()
""",
)
def test_doesnt_replace_when_different_id():
assert_runs_to_output(
SourceSameAsTargetUnclearOrigin,
f"""
state = {SourceSameAsTargetUnclearOrigin.__name__}()
values_0 = state.f(value=[])
state.f(value=[])
state.teardown()
""",
)
| SourceSameAsTargetUnclearOrigin |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 75917,
"end": 81527
} | class ____(Request):
"""
Creates a new dataset with an initial (empty) version
:param name: Dataset name. Unique within the company.
:type name: str
:param comment: Dataset comment
:type comment: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param terms_of_use: Terms of use string
:type terms_of_use: str
:param metadata: User-specified metadata object. Keys must not include '$' and
'.'.
:type metadata: dict
:param public: Create a public dataset Limited to 'root' users.
:type public: bool
"""
_service = "datasets"
_action = "create"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"comment": {"description": "Dataset comment", "type": "string"},
"metadata": {
"additionalProperties": True,
"description": "User-specified metadata object. Keys must not include '$' and '.'.",
"type": "object",
},
"name": {
"description": "Dataset name. Unique within the company.",
"type": "string",
},
"public": {
"description": "Create a public dataset Limited to 'root' users.",
"type": "boolean",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"terms_of_use": {"description": "Terms of use string", "type": "string"},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
},
"required": ["name"],
"type": "object",
}
def __init__(
self,
name,
comment=None,
tags=None,
system_tags=None,
terms_of_use=None,
metadata=None,
public=None,
project=None,
**kwargs
):
super(CreateRequest, self).__init__(**kwargs)
self.name = name
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.terms_of_use = terms_of_use
self.metadata = metadata
self.public = public
self.project = project
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self):
return self._property_comment
@comment.setter
def comment(self, value):
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("terms_of_use")
def terms_of_use(self):
return self._property_terms_of_use
@terms_of_use.setter
def terms_of_use(self, value):
if value is None:
self._property_terms_of_use = None
return
self.assert_isinstance(value, "terms_of_use", six.string_types)
self._property_terms_of_use = value
@schema_property("metadata")
def metadata(self):
return self._property_metadata
@metadata.setter
def metadata(self, value):
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (dict,))
self._property_metadata = value
@schema_property("public")
def public(self):
return self._property_public
@public.setter
def public(self, value):
if value is None:
self._property_public = None
return
self.assert_isinstance(value, "public", (bool,))
self._property_public = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
| CreateRequest |
python | great-expectations__great_expectations | tests/expectations/test_expectation.py | {
"start": 1504,
"end": 1607
} | class ____(MulticolumnMapExpectation):
map_metric = "fake_multicol_metric"
| FakeMulticolumnExpectation |
python | eth-brownie__brownie | brownie/project/main.py | {
"start": 24750,
"end": 43486
} | class ____(_ProjectBase):
"""Simplified Project class used to hold temporary contracts that are
compiled via project.compile_source"""
def __init__(self, name: str, contract_sources: Dict, compiler_config: CompilerConfig) -> None:
self._path = None
self._build_path = None
self._name: Final = name
self._sources = Sources(contract_sources, {})
self._build = Build(self._sources)
self._compile(contract_sources, compiler_config, True)
self._create_containers()
def __repr__(self) -> str:
return f"<TempProject '{self._name}'>"
def check_for_project(path: pathlib.Path | str = ".") -> Optional[pathlib.Path]:
"""Checks for a Brownie project."""
path = Path(path).resolve()
for folder in [path] + list(path.parents):
structure_config = _load_project_structure_config(folder)
contracts = folder.joinpath(structure_config["contracts"])
interfaces = folder.joinpath(structure_config["interfaces"])
scripts = folder.joinpath(structure_config["scripts"])
tests = folder.joinpath(structure_config["tests"])
if next((i for i in contracts.glob("**/*") if i.suffix in (".vy", ".sol")), None):
return folder
if next((i for i in interfaces.glob("**/*") if i.suffix in (".json", ".vy", ".sol")), None):
return folder
if next((i for i in scripts.glob("**/*") if i.suffix in (".py")), None):
return folder
if contracts.is_dir() and tests.is_dir():
return folder
return None
def get_loaded_projects() -> List["Project"]:
"""Returns a list of currently loaded Project objects."""
return _loaded_projects.copy()
def new(
project_path_str: str = ".", ignore_subfolder: bool = False, ignore_existing: bool = False
) -> str:
"""Initializes a new project.
Args:
project_path: Path to initialize the project at. If not exists, it will be created.
ignore_subfolder: (deprecated)
ignore_existing: If True, will not raise when initiating in a non-empty directory.
Returns the path to the project as a string.
"""
project_path = Path(project_path_str).resolve()
if not ignore_existing and project_path.exists() and list(project_path.glob("*")):
raise FileExistsError(f"Directory is not empty: {project_path}")
project_path.mkdir(exist_ok=True)
_create_folders(project_path)
_create_gitfiles(project_path)
_add_to_sys_path(project_path)
return str(project_path)
def from_brownie_mix(
project_name: str,
project_path: Optional[pathlib.Path | str] = None,
ignore_subfolder: bool = False,
) -> str:
"""Initializes a new project via a template. Templates are downloaded from
https://www.github.com/brownie-mix
Args:
project_path: Path to initialize the project at.
ignore_subfolders: (deprecated)
Returns the path to the project as a string.
"""
project_name = str(project_name).lower().replace("-mix", "")
headers = REQUEST_HEADERS.copy()
headers.update(_maybe_retrieve_github_auth())
default_branch = _get_mix_default_branch(project_name, headers)
url = MIXES_URL.format(project_name, default_branch)
if project_path is None:
project_path = Path(".").joinpath(project_name)
project_path = Path(project_path).resolve()
if project_path.exists() and list(project_path.glob("*")):
raise FileExistsError(f"Folder already exists - {project_path}")
print(f"Downloading from {url}...")
_stream_download(url, str(project_path.parent), headers)
project_path.parent.joinpath(f"{project_name}-mix-{default_branch}").rename(project_path)
_create_folders(project_path)
_create_gitfiles(project_path)
_add_to_sys_path(project_path)
return str(project_path)
def compile_source(
source: str,
solc_version: Optional[str] = None,
vyper_version: Optional[str] = None,
optimize: bool = True,
runs: Optional[int] = 200,
evm_version: Optional[EvmVersion] = None,
) -> "TempProject":
"""
Compile the given source code string and return a TempProject container with
the ContractContainer instances.
"""
compiler_config: CompilerConfig = {"evm_version": evm_version, "solc": {}, "vyper": {}}
# if no compiler version was given, first try to find a Solidity pragma
if solc_version is None and vyper_version is None:
try:
solc_version = compiler.solidity.find_best_solc_version(
{"<stdin>": source}, install_needed=True, silent=False
)
except (PragmaError, SolcNotInstalled):
pass
if vyper_version is None:
# if no vyper compiler version is given, try to compile using solidity
compiler_config["solc"] = {
"version": solc_version or str(compiler.solidity.get_version().truncate()),
"optimize": bool(optimize),
"runs": runs or 0,
}
try:
return TempProject("TempSolcProject", {"<stdin>.sol": source}, compiler_config)
except Exception as exc:
# if compilation fails, raise when a solc version was given or we found a pragma
if solc_version is not None:
raise exc
if vyper_version is None:
# if no vyper compiler version was given, try to find a pragma
try:
vyper_version = compiler.vyper.find_best_vyper_version(
{"<stdin>": source}, install_needed=True, silent=False
)
except (PragmaError, VyperNotInstalled):
pass
compiler_config["vyper"] = {"version": vyper_version or compiler.vyper.get_version()}
try:
return TempProject("TempVyperProject", {"<stdin>.vy": source}, compiler_config)
except Exception as exc:
if solc_version is None and vyper_version is None:
raise PragmaError(
"No compiler version specified, no pragma statement in the source, "
"and compilation failed with both solc and vyper"
) from None
raise exc
def load(
project_path: Optional[pathlib.Path | str] = None,
name: Optional[str] = None,
raise_if_loaded: bool = True,
compile: bool = True,
) -> "Project":
"""Loads a project and instantiates various related objects.
Args:
project_path: Path of the project to load. If None, will attempt to
locate a project using check_for_project()
name: Name to assign to the project. If None, the name is generated
from the name of the project folder
Returns a Project object.
"""
# checks
if project_path is None:
project_path = check_for_project(".")
if project_path is not None and project_path != Path(".").absolute():
warnings.warn(
f"Loaded project has a root folder of '{project_path}' "
"which is different from the current working directory",
BrownieEnvironmentWarning,
)
else:
project_path = Path(project_path)
if project_path.resolve() != check_for_project(project_path):
packages_path = _get_data_folder().joinpath("packages")
if not project_path.is_absolute() and packages_path.joinpath(project_path).exists():
project_path = packages_path.joinpath(project_path)
else:
project_path = None
if project_path is None:
raise ProjectNotFound("Could not find Brownie project")
project_path = Path(project_path).resolve()
if name is None:
name = project_path.name
if not name.lower().endswith("project"):
name += " project"
if not name[0].isalpha():
raise BadProjectName("Project must start with an alphabetic character")
name = "".join(i for i in name.title() if i.isalnum())
for loaded_project in _loaded_projects:
if loaded_project._name == name:
if raise_if_loaded:
raise ProjectAlreadyLoaded("There is already a project loaded with this name")
return loaded_project
# paths
_create_folders(project_path)
_add_to_sys_path(project_path)
# load sources and build
return Project(name, project_path, compile=compile)
def _install_dependencies(path: pathlib.Path) -> None:
for package_id in _load_project_dependencies(path):
try:
install_package(package_id)
except FileExistsError:
pass
def install_package(package_id: str) -> str:
"""
Install a package.
Arguments
---------
package_id : str
Package ID
Returns
-------
str
ID of the installed package.
"""
return _install_from_github(package_id)
def _maybe_retrieve_github_auth() -> Dict[str, str]:
"""Returns appropriate github authorization headers.
Otherwise returns an empty dict if no auth token is present.
"""
if token := os.getenv("GITHUB_TOKEN"):
auth = b64encode(token.encode()).decode()
return {"Authorization": f"Basic {auth}"}
return {}
def _install_from_github(package_id: str) -> str:
try:
path, version = package_id.split("@", 1)
org, repo = path.split("/")
except ValueError:
raise ValueError(
"Invalid package ID. Must be given as [ORG]/[REPO]@[VERSION]"
"\ne.g. 'OpenZeppelin/openzeppelin-contracts@v2.5.0'"
) from None
base_install_path = _get_data_folder().joinpath("packages")
install_path = base_install_path.joinpath(f"{org}")
install_path.mkdir(exist_ok=True)
install_path = install_path.joinpath(f"{repo}@{version}")
if install_path.exists():
raise FileExistsError("Package is already installed")
headers = REQUEST_HEADERS.copy()
headers.update(_maybe_retrieve_github_auth())
if regex_match(r"^[0-9a-f]+$", version):
download_url = f"https://api.github.com/repos/{org}/{repo}/zipball/{version}"
else:
download_url = _get_download_url_from_tag(org, repo, version, headers)
existing = list(install_path.parent.iterdir())
# Some versions contain special characters and github api seems to display url without
# encoding them.
# It results in a ConnectionError exception because the actual download url is encoded.
# In this case we try to sanitize the version in url and download again.
try:
_stream_download(download_url, str(install_path.parent), headers)
except ConnectionError:
download_url = (
f"https://api.github.com/repos/{org}/{repo}/zipball/refs/tags/{quote(version)}"
)
_stream_download(download_url, str(install_path.parent), headers)
installed = next(i for i in install_path.parent.iterdir() if i not in existing)
shutil.move(installed, install_path)
try:
if not install_path.joinpath("brownie-config.yaml").exists():
brownie_config: Dict = {"project_structure": {}}
contract_paths = {
i.relative_to(install_path).parts[0]
for i in mapcat(install_path.glob, ("**/*.sol", "**/*.vy"))
}
if not contract_paths:
raise InvalidPackage(f"{package_id} does not contain any .sol or .vy files")
if install_path.joinpath("contracts").is_dir():
brownie_config["project_structure"]["contracts"] = "contracts"
elif len(contract_paths) == 1:
brownie_config["project_structure"]["contracts"] = contract_paths.pop()
else:
raise Exception(
f"{package_id} has no `contracts/` subdirectory, and "
"multiple directories containing source files"
)
with install_path.joinpath("brownie-config.yaml").open("w") as fp:
yaml.dump(brownie_config, fp)
Path.touch(install_path / ".env")
project = load(install_path)
project.close()
except InvalidPackage:
shutil.rmtree(install_path)
raise
except Exception as e:
notify(
"WARNING",
f"Unable to compile {package_id} due to a {type(e).__name__} - you may still be able to"
" import sources from the package, but will be unable to load the package directly.\n",
)
return f"{org}/{repo}@{version}"
def _get_download_url_from_tag(org: str, repo: str, version: str, headers: dict) -> str:
response = requests.get(
f"https://api.github.com/repos/{org}/{repo}/tags?per_page=100", headers=headers
)
status_code = response.status_code
if status_code != 200:
message = response.json()["message"]
msg = f"Status {status_code} when getting package versions from Github: '{message}'"
if status_code in {403, 404}:
msg += (
"\n\nMissing or forbidden.\n"
"If this issue persists, generate a Github API token and store"
" it as the environment variable `GITHUB_TOKEN`:\n"
"https://github.blog/2013-05-16-personal-api-tokens/"
)
raise ConnectionError(msg)
data = response.json()
if not data:
raise ValueError("Github repository has no tags set")
org, repo = data[0]["zipball_url"].split("/")[3:5]
tags = [i["name"].lstrip("v") for i in data]
if version not in tags:
raise ValueError(
"Invalid version for this package. Available versions are:\n" + ", ".join(tags)
) from None
return next(i["zipball_url"] for i in data if i["name"].lstrip("v") == version)
def _create_gitfiles(project_path: pathlib.Path) -> None:
gitignore = project_path.joinpath(".gitignore")
if not gitignore.exists():
with gitignore.open("w") as fp:
fp.write(GITIGNORE)
gitattributes = project_path.joinpath(".gitattributes")
if not gitattributes.exists():
with gitattributes.open("w") as fp:
fp.write(GITATTRIBUTES)
def _create_folders(project_path: pathlib.Path) -> None:
structure = _load_project_structure_config(project_path)
for path in structure.values():
project_path.joinpath(path).mkdir(exist_ok=True)
build_path = project_path.joinpath(structure["build"])
for path in BUILD_FOLDERS:
build_path.joinpath(path).mkdir(exist_ok=True)
def _add_to_sys_path(project_path: pathlib.Path) -> None:
project_path_string = str(project_path)
if project_path_string in sys.path:
return
sys.path.insert(0, project_path_string)
def _compare_settings(left: Dict, right: Dict) -> bool:
return any(v and not isinstance(v, dict) and v != right.get(k) for k, v in left.items())
def _normalize_solidity_version(version: str) -> str:
return version.split("+")[0]
def _solidity_compiler_equal(config: SolcConfig, build: CompilerConfig) -> bool:
return (
config["version"] is None
or _normalize_solidity_version(config["version"])
== _normalize_solidity_version(build["version"])
) and config["optimizer"] == build["optimizer"]
def _vyper_compiler_equal(config: VyperConfig, build: CompilerConfig) -> bool:
return config["version"] is None or config["version"] == build["version"]
def _load_sources(project_path: pathlib.Path, subfolder: str, allow_json: bool) -> Dict:
contract_sources: Dict = {}
suffixes: Tuple = (".sol", ".vy")
if allow_json:
suffixes = suffixes + (".json",)
# one day this will be a beautiful plugin system
hooks: Optional[ModuleType] = None
if project_path.joinpath("brownie_hooks.py").exists():
hooks = import_module("brownie_hooks")
for path in project_path.glob(f"{subfolder}/**/*"):
if path.suffix not in suffixes:
continue
if next((i for i in path.relative_to(project_path).parts if i.startswith("_")), False):
continue
with path.open(encoding="utf-8") as fp:
source = fp.read()
if hasattr(hooks, "brownie_load_source"):
source = hooks.brownie_load_source(path, source)
path_str: str = path.relative_to(project_path).as_posix()
contract_sources[path_str] = source
return contract_sources
def _stream_download(
download_url: str, target_path: str, headers: Dict[str, str] = REQUEST_HEADERS
) -> None:
response = requests.get(download_url, stream=True, headers=headers)
if response.status_code == 404:
raise ConnectionError(
f"404 error when attempting to download from {download_url} - "
"are you sure this is a valid mix? https://github.com/brownie-mix"
)
if response.status_code != 200:
raise ConnectionError(
f"Received status code {response.status_code} when attempting "
f"to download from {download_url}"
)
total_size = int(response.headers.get("content-length", 0))
progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
content = bytes()
for data in response.iter_content(1024, decode_unicode=True):
progress_bar.update(len(data))
content += data
progress_bar.close()
with zipfile.ZipFile(BytesIO(content)) as zf:
zf.extractall(target_path)
def _get_mix_default_branch(mix_name: str, headers: Dict[str, str] = REQUEST_HEADERS) -> str:
"""Get the default branch for a brownie-mix repository.
Arguments
---------
mix_name : str
Name of a brownie-mix repository without -mix appended.
Returns
-------
str
The default branch name on github.
"""
REPO_GH_API = f"https://api.github.com/repos/brownie-mix/{mix_name}-mix"
r = requests.get(REPO_GH_API, headers=headers)
if r.status_code != 200:
status, repo, message = r.status_code, f"brownie-mix/{mix_name}", r.json()["message"]
msg = f"Status {status} when retrieving repo {repo} information from GHAPI: '{message}'"
if r.status_code in {403, 404}:
msg_lines = (
msg,
"\n\nMissing or forbidden.\n",
"If this issue persists, generate a Github API token and store",
" it as the environment variable `GITHUB_TOKEN`:\n",
"https://github.blog/2013-05-16-personal-api-tokens/",
)
msg = "".join(msg_lines)
raise ConnectionError(msg)
elif "default_branch" not in r.json():
msg = f"API results did not include {mix_name}'s default branch"
raise KeyError(msg)
return r.json()["default_branch"]
| TempProject |
python | walkccc__LeetCode | solutions/2050. Parallel Courses III/2050.py | {
"start": 0,
"end": 665
} | class ____:
def minimumTime(
self,
n: int,
relations: list[list[int]],
time: list[int],
) -> int:
graph = [[] for _ in range(n)]
inDegrees = [0] * n
dist = time.copy()
# Build the graph.
for a, b in relations:
u = a - 1
v = b - 1
graph[u].append(v)
inDegrees[v] += 1
# Perform topological sorting.
q = collections.deque([i for i, d in enumerate(inDegrees) if d == 0])
while q:
u = q.popleft()
for v in graph[u]:
dist[v] = max(dist[v], dist[u] + time[v])
inDegrees[v] -= 1
if inDegrees[v] == 0:
q.append(v)
return max(dist)
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py | {
"start": 14206,
"end": 15038
} | class ____(Metafield):
"""
{
orders(query: "updated_at:>='2023-02-07T00:00:00+00:00' AND updated_at:<='2023-12-04T00:00:00+00:00'", sortKey: UPDATED_AT) {
edges {
node {
id
metafields {
edges {
node {
id
namespace
value
key
description
createdAt
updatedAt
type
}
}
}
}
}
}
}
"""
type = MetafieldType.ORDERS
| MetafieldOrder |
python | cython__cython | Cython/Build/Tests/TestCythonizeArgsParser.py | {
"start": 313,
"end": 18614
} | class ____(TestCase):
def setUp(self):
TestCase.setUp(self)
self.parse_args = lambda x, parser=create_args_parser() : parse_args_raw(parser, x)
def are_default(self, options, skip):
# empty containers
empty_containers = ['directives', 'compile_time_env', 'options', 'excludes']
are_none = ['language_level', 'annotate', 'build', 'build_inplace', 'force', 'quiet', 'lenient', 'keep_going', 'no_docstrings']
for opt_name in empty_containers:
if len(getattr(options, opt_name))!=0 and (opt_name not in skip):
self.assertEqual(opt_name,"", msg="For option "+opt_name)
return False
for opt_name in are_none:
if (getattr(options, opt_name) is not None) and (opt_name not in skip):
self.assertEqual(opt_name,"", msg="For option "+opt_name)
return False
if options.parallel!=parallel_compiles and ('parallel' not in skip):
return False
return True
# testing directives:
def test_directive_short(self):
options, args = self.parse_args(['-X', 'cdivision=True'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['directives']))
self.assertEqual(options.directives['cdivision'], True)
def test_directive_long(self):
options, args = self.parse_args(['--directive', 'cdivision=True'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['directives']))
self.assertEqual(options.directives['cdivision'], True)
def test_directive_multiple(self):
options, args = self.parse_args(['-X', 'cdivision=True', '-X', 'c_string_type=bytes'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['directives']))
self.assertEqual(options.directives['cdivision'], True)
self.assertEqual(options.directives['c_string_type'], 'bytes')
def test_directive_multiple_v2(self):
options, args = self.parse_args(['-X', 'cdivision=True,c_string_type=bytes'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['directives']))
self.assertEqual(options.directives['cdivision'], True)
self.assertEqual(options.directives['c_string_type'], 'bytes')
def test_directive_value_yes(self):
options, args = self.parse_args(['-X', 'cdivision=YeS'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['directives']))
self.assertEqual(options.directives['cdivision'], True)
def test_directive_value_no(self):
options, args = self.parse_args(['-X', 'cdivision=no'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['directives']))
self.assertEqual(options.directives['cdivision'], False)
def test_directive_value_invalid(self):
with self.assertRaises(ValueError) as context:
options, args = self.parse_args(['-X', 'cdivision=sadfasd'])
def test_directive_key_invalid(self):
with self.assertRaises(ValueError) as context:
options, args = self.parse_args(['-X', 'abracadabra'])
def test_directive_no_value(self):
with self.assertRaises(ValueError) as context:
options, args = self.parse_args(['-X', 'cdivision'])
def test_directives_types(self):
directives = [
('auto_pickle', True),
('c_string_type', 'bytearray'),
('c_string_type', 'bytes'),
('c_string_type', 'str'),
('c_string_type', 'bytearray'),
('c_string_type', 'unicode'),
('c_string_encoding', 'ascii'),
('language_level', '2'),
('language_level', '3'),
#('language_level', '3str'),
('set_initial_path', 'my_initial_path'),
]
for key, value in directives:
cmd = '{key}={value}'.format(key=key, value=str(value))
options, args = self.parse_args(['-X', cmd])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['directives']), msg = "Error for option: "+cmd)
if value == 'unicode':
value = 'str'
self.assertEqual(options.directives[key], value, msg = "Error for option: "+cmd)
def test_directives_wrong(self):
directives = [
('auto_pickle', 42), # for bool type
('auto_pickle', 'NONONO'), # for bool type
('c_string_type', 'bites'),
#('c_string_encoding', 'a'),
#('language_level', 4),
]
for key, value in directives:
cmd = '{key}={value}'.format(key=key, value=str(value))
with self.assertRaises(ValueError, msg = "Error for option: "+cmd) as context:
options, args = self.parse_args(['-X', cmd])
def test_compile_time_env_short(self):
options, args = self.parse_args(['-E', 'MYSIZE=10'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['compile_time_env']))
self.assertEqual(options.compile_time_env['MYSIZE'], 10)
def test_compile_time_env_long(self):
options, args = self.parse_args(['--compile-time-env', 'MYSIZE=10'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['compile_time_env']))
self.assertEqual(options.compile_time_env['MYSIZE'], 10)
def test_compile_time_env_multiple(self):
options, args = self.parse_args(['-E', 'MYSIZE=10', '-E', 'ARRSIZE=11'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['compile_time_env']))
self.assertEqual(options.compile_time_env['MYSIZE'], 10)
self.assertEqual(options.compile_time_env['ARRSIZE'], 11)
def test_compile_time_env_multiple_v2(self):
options, args = self.parse_args(['-E', 'MYSIZE=10,ARRSIZE=11'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['compile_time_env']))
self.assertEqual(options.compile_time_env['MYSIZE'], 10)
self.assertEqual(options.compile_time_env['ARRSIZE'], 11)
#testing options
def test_option_short(self):
options, args = self.parse_args(['-s', 'docstrings=True'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], True)
def test_option_long(self):
options, args = self.parse_args(['--option', 'docstrings=True'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], True)
def test_option_multiple(self):
options, args = self.parse_args(['-s', 'docstrings=True', '-s', 'buffer_max_dims=8'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], True)
self.assertEqual(options.options['buffer_max_dims'], True) # really?
def test_option_multiple_v2(self):
options, args = self.parse_args(['-s', 'docstrings=True,buffer_max_dims=8'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], True)
self.assertEqual(options.options['buffer_max_dims'], True) # really?
def test_option_value_yes(self):
options, args = self.parse_args(['-s', 'docstrings=YeS'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], True)
def test_option_value_4242(self):
options, args = self.parse_args(['-s', 'docstrings=4242'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], True)
def test_option_value_0(self):
options, args = self.parse_args(['-s', 'docstrings=0'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], False)
def test_option_value_emptystr(self):
options, args = self.parse_args(['-s', 'docstrings='])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], True)
def test_option_value_a_str(self):
options, args = self.parse_args(['-s', 'docstrings=BB'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], True)
def test_option_value_no(self):
options, args = self.parse_args(['-s', 'docstrings=nO'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], False)
def test_option_no_value(self):
options, args = self.parse_args(['-s', 'docstrings'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['docstrings'], True)
def test_option_any_key(self):
options, args = self.parse_args(['-s', 'abracadabra'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['options']))
self.assertEqual(options.options['abracadabra'], True)
def test_language_level_2(self):
options, args = self.parse_args(['-2'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['language_level']))
self.assertEqual(options.language_level, 2)
def test_language_level_3(self):
options, args = self.parse_args(['-3'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['language_level']))
self.assertEqual(options.language_level, 3)
def test_language_level_3str(self):
options, args = self.parse_args(['--3str'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['language_level']))
self.assertEqual(options.language_level, 3)
def test_annotate_short(self):
options, args = self.parse_args(['-a'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['annotate']))
self.assertEqual(options.annotate, 'default')
def test_annotate_long(self):
options, args = self.parse_args(['--annotate'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['annotate']))
self.assertEqual(options.annotate, 'default')
def test_annotate_fullc(self):
options, args = self.parse_args(['--annotate-fullc'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['annotate']))
self.assertEqual(options.annotate, 'fullc')
def test_annotate_and_positional(self):
options, args = self.parse_args(['-a', 'foo.pyx'])
self.assertEqual(args, ['foo.pyx'])
self.assertTrue(self.are_default(options, ['annotate']))
self.assertEqual(options.annotate, 'default')
def test_annotate_and_optional(self):
options, args = self.parse_args(['-a', '--3str'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['annotate', 'language_level']))
self.assertEqual(options.annotate, 'default')
self.assertEqual(options.language_level, 3)
def test_exclude_short(self):
options, args = self.parse_args(['-x', '*.pyx'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['excludes']))
self.assertTrue('*.pyx' in options.excludes)
def test_exclude_long(self):
options, args = self.parse_args(['--exclude', '*.pyx'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['excludes']))
self.assertTrue('*.pyx' in options.excludes)
def test_exclude_multiple(self):
options, args = self.parse_args(['--exclude', '*.pyx', '--exclude', '*.py', ])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['excludes']))
self.assertEqual(options.excludes, ['*.pyx', '*.py'])
def test_build_short(self):
options, args = self.parse_args(['-b'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['build']))
self.assertEqual(options.build, True)
def test_build_long(self):
options, args = self.parse_args(['--build'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['build']))
self.assertEqual(options.build, True)
def test_inplace_short(self):
options, args = self.parse_args(['-i'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['build_inplace']))
self.assertEqual(options.build_inplace, True)
def test_inplace_long(self):
options, args = self.parse_args(['--inplace'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['build_inplace']))
self.assertEqual(options.build_inplace, True)
def test_parallel_short(self):
options, args = self.parse_args(['-j', '42'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['parallel']))
self.assertEqual(options.parallel, 42)
def test_parallel_long(self):
options, args = self.parse_args(['--parallel', '42'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['parallel']))
self.assertEqual(options.parallel, 42)
def test_force_short(self):
options, args = self.parse_args(['-f'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['force']))
self.assertEqual(options.force, True)
def test_force_long(self):
options, args = self.parse_args(['--force'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['force']))
self.assertEqual(options.force, True)
def test_quite_short(self):
options, args = self.parse_args(['-q'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['quiet']))
self.assertEqual(options.quiet, True)
def test_quite_long(self):
options, args = self.parse_args(['--quiet'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['quiet']))
self.assertEqual(options.quiet, True)
def test_lenient_long(self):
options, args = self.parse_args(['--lenient'])
self.assertTrue(self.are_default(options, ['lenient']))
self.assertFalse(args)
self.assertEqual(options.lenient, True)
def test_keep_going_short(self):
options, args = self.parse_args(['-k'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['keep_going']))
self.assertEqual(options.keep_going, True)
def test_keep_going_long(self):
options, args = self.parse_args(['--keep-going'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['keep_going']))
self.assertEqual(options.keep_going, True)
def test_no_docstrings_long(self):
options, args = self.parse_args(['--no-docstrings'])
self.assertFalse(args)
self.assertTrue(self.are_default(options, ['no_docstrings']))
self.assertEqual(options.no_docstrings, True)
def test_file_name(self):
options, args = self.parse_args(['file1.pyx', 'file2.pyx'])
self.assertEqual(len(args), 2)
self.assertEqual(args[0], 'file1.pyx')
self.assertEqual(args[1], 'file2.pyx')
self.assertTrue(self.are_default(options, []))
def test_option_first(self):
options, args = self.parse_args(['-i', 'file.pyx'])
self.assertEqual(args, ['file.pyx'])
self.assertEqual(options.build_inplace, True)
self.assertTrue(self.are_default(options, ['build_inplace']))
def test_file_inbetween(self):
options, args = self.parse_args(['-i', 'file.pyx', '-a'])
self.assertEqual(args, ['file.pyx'])
self.assertEqual(options.build_inplace, True)
self.assertEqual(options.annotate, 'default')
self.assertTrue(self.are_default(options, ['build_inplace', 'annotate']))
def test_option_trailing(self):
options, args = self.parse_args(['file.pyx', '-i'])
self.assertEqual(args, ['file.pyx'])
self.assertEqual(options.build_inplace, True)
self.assertTrue(self.are_default(options, ['build_inplace']))
def test_interspersed_positional(self):
options, sources = self.parse_args([
'file1.pyx', '-a',
'file2.pyx'
])
self.assertEqual(sources, ['file1.pyx', 'file2.pyx'])
self.assertEqual(options.annotate, 'default')
self.assertTrue(self.are_default(options, ['annotate']))
def test_interspersed_positional2(self):
options, sources = self.parse_args([
'file1.pyx', '-a',
'file2.pyx', '-a', 'file3.pyx'
])
self.assertEqual(sources, ['file1.pyx', 'file2.pyx', 'file3.pyx'])
self.assertEqual(options.annotate, 'default')
self.assertTrue(self.are_default(options, ['annotate']))
def test_interspersed_positional3(self):
options, sources = self.parse_args([
'-f', 'f1', 'f2', '-a',
'f3', 'f4', '-a', 'f5'
])
self.assertEqual(sources, ['f1', 'f2', 'f3', 'f4', 'f5'])
self.assertEqual(options.annotate, 'default')
self.assertEqual(options.force, True)
self.assertTrue(self.are_default(options, ['annotate', 'force']))
def test_wrong_option(self):
old_stderr = sys.stderr
stderr = sys.stderr = StringIO()
try:
self.assertRaises(SystemExit, self.parse_args,
['--unknown-option']
)
finally:
sys.stderr = old_stderr
self.assertTrue(stderr.getvalue())
| TestCythonizeArgsParser |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 35337,
"end": 36508
} | class ____:
def test_dynamic(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"keywords": 9000, "dynamic": ["keywords"]}})
with pytest.raises(
ValueError,
match="Metadata field `keywords` cannot be both statically defined and listed in field `project.dynamic`",
):
_ = metadata.core.keywords
def test_not_array(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"keywords": 10}})
with pytest.raises(TypeError, match="Field `project.keywords` must be an array"):
_ = metadata.core.keywords
def test_entry_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"keywords": [10]}})
with pytest.raises(TypeError, match="Keyword #1 of field `project.keywords` must be a string"):
_ = metadata.core.keywords
def test_correct(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"keywords": ["foo", "foo", "bar"]}})
assert metadata.core.keywords == metadata.core.keywords == ["bar", "foo"]
| TestKeywords |
python | tiangolo__fastapi | tests/test_duplicate_models_openapi.py | {
"start": 195,
"end": 2159
} | class ____(BaseModel):
c: Model
d: Model2
@app.get("/", response_model=Model3)
def f():
return {"c": {}, "d": {"a": {}}}
client = TestClient(app)
def test_get_api_route():
response = client.get("/")
assert response.status_code == 200, response.text
assert response.json() == {"c": {}, "d": {"a": {}}}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"get": {
"summary": "F",
"operationId": "f__get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Model3"}
}
},
}
},
}
}
},
"components": {
"schemas": {
"Model": {"title": "Model", "type": "object", "properties": {}},
"Model2": {
"title": "Model2",
"required": ["a"],
"type": "object",
"properties": {"a": {"$ref": "#/components/schemas/Model"}},
},
"Model3": {
"title": "Model3",
"required": ["c", "d"],
"type": "object",
"properties": {
"c": {"$ref": "#/components/schemas/Model"},
"d": {"$ref": "#/components/schemas/Model2"},
},
},
}
},
}
| Model3 |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_organization_uptime_alert_index.py | {
"start": 377,
"end": 3905
} | class ____(OrganizationUptimeAlertIndexBaseEndpointTest):
method = "get"
def check_valid_response(self, response, expected_detectors):
assert [
serialize(uptime_alert, serializer=UptimeDetectorSerializer())
for uptime_alert in expected_detectors
] == response.data
def test(self) -> None:
alert_1 = self.create_uptime_detector(name="test1")
alert_2 = self.create_uptime_detector(name="test2")
resp = self.get_success_response(self.organization.slug)
self.check_valid_response(resp, [alert_1, alert_2])
def test_search_by_url(self) -> None:
self.create_uptime_detector()
santry_monitor = self.create_uptime_detector(
uptime_subscription=self.create_uptime_subscription(url="https://santry.com")
)
response = self.get_success_response(self.organization.slug, query="santry")
self.check_valid_response(response, [santry_monitor])
def test_environment_filter(self) -> None:
env = self.create_environment()
self.create_uptime_detector()
env_detector = self.create_uptime_detector(env=env)
response = self.get_success_response(self.organization.slug, environment=[env.name])
self.check_valid_response(response, [env_detector])
def test_owner_filter(self) -> None:
user_1 = self.create_user()
user_2 = self.create_user()
team_1 = self.create_team()
team_2 = self.create_team()
self.create_team_membership(team_2, user=self.user)
uptime_a = self.create_uptime_detector(owner=user_1)
uptime_b = self.create_uptime_detector(owner=user_2)
uptime_c = self.create_uptime_detector(owner=team_1)
uptime_d = self.create_uptime_detector(owner=team_2)
uptime_e = self.create_uptime_detector(owner=None)
# Monitor by user
response = self.get_success_response(self.organization.slug, owner=[f"user:{user_1.id}"])
self.check_valid_response(response, [uptime_a])
# Monitors by users and teams
response = self.get_success_response(
self.organization.slug,
owner=[f"user:{user_1.id}", f"user:{user_2.id}", f"team:{team_1.id}"],
)
self.check_valid_response(response, [uptime_a, uptime_b, uptime_c])
# myteams
response = self.get_success_response(
self.organization.slug,
owner=["myteams"],
)
self.check_valid_response(response, [uptime_d])
# unassigned monitors
response = self.get_success_response(
self.organization.slug,
owner=["unassigned", f"user:{user_1.id}"],
)
self.check_valid_response(response, [uptime_a, uptime_e])
# Invalid user ID
response = self.get_success_response(
self.organization.slug,
owner=["user:12345"],
)
self.check_valid_response(response, [])
def test_only_returns_active_detectors(self) -> None:
active_detector = self.create_uptime_detector(name="active", status=ObjectStatus.ACTIVE)
self.create_uptime_detector(name="pending_deletion", status=ObjectStatus.PENDING_DELETION)
self.create_uptime_detector(
name="deletion_in_progress", status=ObjectStatus.DELETION_IN_PROGRESS
)
response = self.get_success_response(self.organization.slug)
self.check_valid_response(response, [active_detector])
| OrganizationUptimeAlertIndexEndpointTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_base_classes_1.py | {
"start": 367,
"end": 409
} | class ____(BaseModel):
x: pathlib.Path
| C |
python | plotly__plotly.py | plotly/graph_objs/funnel/_insidetextfont.py | {
"start": 233,
"end": 17179
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnel"
_path_str = "funnel.insidetextfont"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Insidetextfont object
Sets the font used for `text` lying inside the bar.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnel.Insidetextfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Insidetextfont
"""
super().__init__("insidetextfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnel.Insidetextfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnel.Insidetextfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Insidetextfont |
python | pytorch__pytorch | torch/_inductor/codegen/cpp.py | {
"start": 184179,
"end": 221606
} | class ____(BaseScheduling):
# Subclass CppKernelProxy to customize codegen without copying codegen_node().
# Use kernel_proxy_cls to inject custom proxies in CppScheduling subclasses.
# Avoid duplicating codegen_node() just to swap in a custom kernel proxy class.
kernel_proxy_cls: type[CppKernelProxy] = CppKernelProxy
# ctypes limits the number of args to 1024, refer to:
# https://github.com/python/cpython/commit/a285af7e626d1b81cf09f8b2bf7656f100bc1237
# We set a conservative threshold here.
MAX_FUSED_KERNEL_ARGS_NUM = 500
backend_features = OrderedSet(
[
BackendFeature.INPLACE_BUFFERS,
BackendFeature.REDUCE_TO_SINGLE_ELEMENT,
]
)
@classmethod
def get_backend_features(cls, device: torch.device) -> OrderedSet[BackendFeature]:
return cls.backend_features
def __init__(self, scheduler):
super().__init__(scheduler)
if scheduler:
self.reset_kernel_group()
self._ready_to_flush = False
def _set_flush_status(self, status: bool):
self._ready_to_flush = status
def group_fn(self, sizes):
return tuple(tuple(map(V.graph.sizevars.simplify, s)) for s in sizes)
def reset_kernel_group(self):
self.kernel_group = KernelGroup()
def fuse(self, node1, node2):
if node1.is_foreach() or node2.is_foreach():
return ForeachKernelSchedulerNode.fuse(node1, node2)
elif node1.is_template():
assert not node2.is_template()
return FusedSchedulerNode.fuse(node1, node2)
else:
if (
self._why_fuse_nodes(node1, node2)
== ReasonFusedNodes.COMPATIBLE_RANGES_NO_REDUCTION
):
assert isinstance(node1, (SchedulerNode, FusedSchedulerNode))
assert isinstance(node2, (SchedulerNode, FusedSchedulerNode))
_, (vars1, reduce1) = node1.group
_, (vars2, reduce2) = node2.group
assert reduce1 == () and reduce2 == (), (reduce1, reduce2)
def get_indexing_ranges_exprs(node):
if isinstance(node, FusedSchedulerNode):
assert len(node.snodes) > 0, node.snodes
var_ranges = None
indexing_exprs = OrderedSet[Any]()
for snode in node.snodes:
v, exprs = get_indexing_ranges_exprs(snode)
if var_ranges is None:
var_ranges = v
assert var_ranges == v, (var_ranges, v, node.snodes)
indexing_exprs.update(exprs)
return var_ranges, list(indexing_exprs)
else:
assert isinstance(node, SchedulerNode)
comp_buffer = node.node
assert isinstance(comp_buffer, ir.ComputedBuffer)
_, body, _ = comp_buffer.get_default_sizes_body()
return body.var_ranges, list(body.indexing_exprs.values())
node_to_recomp = node1 if len(vars1) < len(vars2) else node2
assert isinstance(node_to_recomp, SchedulerNode)
ref_node = node2 if len(vars1) < len(vars2) else node1
ref_indexing_constraints = get_indexing_ranges_exprs(ref_node)
node_to_recomp.recompute_size_and_body(
extra_indexing_constraints=ref_indexing_constraints
)
_, (vars1, _) = node1.group
_, (vars2, _) = node2.group
if vars1 == vars2:
return FusedSchedulerNode.fuse(node1, node2)
# recompute ref_node if its ranges are also changed
node_to_recomp_indexing_constraints = get_indexing_ranges_exprs(
node_to_recomp
)
if isinstance(ref_node, SchedulerNode):
ref_node.recompute_size_and_body(
extra_indexing_constraints=node_to_recomp_indexing_constraints
)
else:
assert isinstance(ref_node, FusedSchedulerNode)
for snode in ref_node.snodes:
assert isinstance(snode, SchedulerNode)
snode.recompute_size_and_body(
extra_indexing_constraints=node_to_recomp_indexing_constraints
)
ref_node = FusedSchedulerNode(ref_node.scheduler, ref_node.snodes)
_, (vars1, _) = node1.group
_, (vars2, _) = node2.group
assert vars1 == vars2, (vars1, vars2)
return FusedSchedulerNode.fuse(node1, node2)
elif self.can_fuse_vertical_outer_loop(node1, node2):
return OuterLoopFusedSchedulerNode.fuse(
node1, node2, self._get_outer_loop_fusion_depth(node1, node2)
)
else:
return FusedSchedulerNode.fuse(node1, node2)
def _why_fuse_nodes(self, node1, node2) -> Optional[ReasonFusedNodes]:
_, (vars1, reduce1) = node1.group
_, (vars2, reduce2) = node2.group
if vars1 == vars2 and reduce1 == reduce2:
return ReasonFusedNodes.SAME_VARS_REDUCE
if reduce1 == () and vars1 == vars2 + reduce2:
return ReasonFusedNodes.COMPATIBLE_REDUCTION
if self._can_fuse_nodes_with_compatible_ranges(node1, node2):
return ReasonFusedNodes.COMPATIBLE_RANGES_NO_REDUCTION
# TODO(jansel): allow fusion pointwise (vars1, ()) suffix?
return None
def _can_fuse_nodes_with_compatible_ranges(self, node1, node2):
# Here we try to fuse SchedulerNode/FusedSchedulerNode with compatible ranges
# e.g. (s0, s1, s2) and (s0 * s1 * s2)
_, (vars1, reduce1) = node1.group
_, (vars2, reduce2) = node2.group
c1 = reduce1 == () and reduce2 == ()
c2 = math.prod(vars1) == math.prod(vars2)
c3 = len(vars1) == 1 or len(vars2) == 1
if not (c1 and c2 and c3):
return False
node_to_recomp = node1 if len(vars1) < len(vars2) else node2
ref_node = node2 if len(vars1) < len(vars2) else node1
# We can not recompute sizes and body for nodes other than SchedulerNode
# TODO: we can extend fusion support with compatible ranges for FusedSchedulerNode
if isinstance(node_to_recomp, FusedSchedulerNode):
return False
# It may happen that node1 and node2 compatible number of elements
# but different original ranges, for example:
# {d0: s0, d1: s1, d2: s2} vs {d0: s0*s1*s2}
# See https://github.com/pytorch/pytorch/pull/120077/files#r1500427848 for more details
# TODO: we can fix if it allows us to CSE at least one of the variables
assert isinstance(node_to_recomp, SchedulerNode)
if isinstance(node_to_recomp.node, ir.TemplateBuffer):
return False
assert isinstance(node_to_recomp.node, ir.ComputedBuffer)
# node.data.get_size() is a cheaper version of node.get_read_writes().var_ranges
# but without variable name
ranges2 = node_to_recomp.node.data.get_size()
ranges1 = None
if isinstance(ref_node, FusedSchedulerNode):
ranges_set = OrderedSet[tuple[Any, ...]]()
for snode in ref_node.snodes:
if isinstance(snode.node, ir.TemplateBuffer):
break
assert isinstance(snode.node, ir.ComputedBuffer)
ranges_set.add(tuple(snode.node.data.get_size()))
if len(ranges_set) != 1:
return False
ranges1 = list(next(iter(ranges_set)))
else:
assert isinstance(ref_node, SchedulerNode)
assert isinstance(ref_node.node, ir.ComputedBuffer)
ranges1 = ref_node.node.data.get_size() # type: ignore[assignment]
if ranges1 != ranges2:
return False
return True
def _can_fuse_horizontal_impl(self, node1, node2):
assert isinstance(node1, (FusedSchedulerNode, SchedulerNode))
assert isinstance(node2, (FusedSchedulerNode, SchedulerNode))
if any(
isinstance(node, OuterLoopFusedSchedulerNode) for node in (node1, node2)
):
return False
return self._why_fuse_nodes(node1, node2) is not None
def can_fuse_horizontal(self, node1, node2):
if node1.is_template() or node2.is_template():
return False
if (
len(node1.get_nodes()) + len(node2.get_nodes())
> config.cpp.max_horizontal_fusion_size
):
return False
return self._can_fuse_horizontal_impl(node1, node2)
def can_fuse_multi_outputs_template(
self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> bool:
if template_buf := node1.get_template_node():
return (
isinstance(template_buf.layout, ir.MultiOutputLayout)
and isinstance(node2.node, ir.MultiOutput)
and len(node2.node.inputs) == 1
and node2.node.inputs[0].get_name() == template_buf.name # type: ignore[union-attr]
)
return False
def _get_outer_loop_fusion_depth(self, node1, node2):
DISABLE_OUTER_LOOP_FUSION = 0
if not all(
type(node)
in (OuterLoopFusedSchedulerNode, FusedSchedulerNode, SchedulerNode)
for node in (node1, node2)
):
return DISABLE_OUTER_LOOP_FUSION
_node1 = (
node1.get_outer_nodes()[-1]
if isinstance(node1, OuterLoopFusedSchedulerNode)
else node1
)
assert isinstance(_node1, (FusedSchedulerNode, SchedulerNode))
_node2 = (
node2.get_outer_nodes()[0]
if isinstance(node2, OuterLoopFusedSchedulerNode)
else node2
)
assert isinstance(_node2, (FusedSchedulerNode, SchedulerNode))
_, (vars1, reduce1) = _node1.group
_, (vars2, reduce2) = _node2.group
if vars1 == () and vars2 == () and reduce1 != () and reduce2 != ():
# Reduction only
return DISABLE_OUTER_LOOP_FUSION
if all(type(node) is OuterLoopFusedSchedulerNode for node in (node1, node2)):
return (
node1.outer_loop_fusion_depth
if node1.outer_loop_fusion_depth == node2.outer_loop_fusion_depth
else DISABLE_OUTER_LOOP_FUSION
)
outer_loop_fusion_depth = min(len(vars1), len(vars2))
if (
outer_loop_fusion_depth >= 1
and vars1[:outer_loop_fusion_depth] == vars2[:outer_loop_fusion_depth]
):
if any(
type(node) is OuterLoopFusedSchedulerNode for node in (node1, node2)
):
_compare_node = (
node1 if type(node1) is OuterLoopFusedSchedulerNode else node2
)
if _compare_node.outer_loop_fusion_depth == outer_loop_fusion_depth:
# Same outer loop fusion depth as prev nodes in OuterLoopFusedSchedulerNode
return outer_loop_fusion_depth
else:
return DISABLE_OUTER_LOOP_FUSION
else:
# First 2 nodes to generate OuterLoopFusedSchedulerNode
return outer_loop_fusion_depth
return DISABLE_OUTER_LOOP_FUSION
def can_fuse_vertical_outer_loop(self, node1, node2):
return (
not node1.is_template()
and not node2.is_template()
and node1.get_operation_names() & node2.ancestors
and not (
self._can_fuse_horizontal_impl(node1, node2)
and not node1.is_reduction()
)
and self._get_outer_loop_fusion_depth(node1, node2) >= 1
)
def get_fusion_pair_priority(self, node1, node2):
if self.can_fuse_vertical_outer_loop(node1, node2):
# Outer loop fusion with lower priority
return 1
else:
return 0
def can_fuse_vertical(self, node1, node2):
if node2.is_template():
# TODO(jgong5): support pre-op fusion with template
return False
if node1.is_template():
template_fusion_supported, _ = template_fusion_with_epilogues_supported(
node1, [node2]
)
return not node2.is_reduction() and template_fusion_supported
return (
self._can_fuse_horizontal_impl(node1, node2) and not node1.is_reduction()
) or self.can_fuse_vertical_outer_loop(node1, node2)
def try_loop_split(self, nodes: list[SchedulerNode]):
"""
Apply loop split optimization.
When one of the indexing_exprs contains a division, we eliminate the division by splitting the loop
to avoid non-contiguous loads, subject to the following conditions:
1. No reduction and no mudular index for all nodes.
2. The indexing_exprs of all nodes contain only one (or more, but all the same) division,
where the divisor is an integer and not too small (the divisor > 8), the dividend is
one of the iter_vars, and this var, i.e. the dimension that needs to be split, is
contiguous in all other indexing_exprs.
For example, if the node's var_ranges: {z0: 2, z1: 9216, z2: 960} and indexing_exprs:
{'index0': 8847360*z0 + 960*z1 + z2, 'index1': 32*z0 + (z2//30), 'index2': z2},
we will split z2 -> 30*z2 + z3, then the node's var_ranges will be changed to
{z0: 2, z1: 9216, z2: 32, z3: 30} and indexing_exprs will be changed to
{'index0': 8847360*z0 + 960*z1 + 30*z2 + z3, 'index1': 32*z0 + z2, 'index2': 30*z2 + z3}.
"""
# No reduction and no mudular
if any(
len(node.group[1][1]) != 0
or any(
expr.has(ModularIndexing) for expr in node._body.indexing_exprs.values()
)
for node in nodes
):
return nodes
split_var = None
split_number = None
num_div = 0
div_expr_ = None
match_div = False
matched_node = None
for node in nodes:
assert isinstance(node.node, ir.ComputedBuffer)
_, original_body, _ = node.node.get_default_sizes_body()
for name, expr in original_body.indexing_exprs.items():
if not isinstance(expr, sympy.Expr):
continue
for div_expr in expr.find(FloorDiv):
if (
any(div_expr.has(var) for var in original_body.iter_vars)
and div_expr != div_expr_
):
div_expr_ = div_expr
num_div += 1
if num_div > 1:
return nodes
if (
isinstance(div_expr.args[1], sympy.core.numbers.Integer)
and div_expr.args[0] in original_body.iter_vars
and name is not None
and all(
stride_at_vec_range(expr_, div_expr.args[0]) in (0, 1)
for name_, expr_ in original_body.indexing_exprs.items()
if name_ != name
)
and div_expr.args[1] > 8
):
split_var = div_expr.args[0]
split_number = div_expr.args[1]
match_div = True
matched_node = node
# Only one node contains a division, and the split dimension is contiguous in all other indexing_exprs.
if not match_div:
return nodes
extra_indexing_constraints = None
def loop_split(sizes, body, vars):
index_size, reduce_size = sizes
index_vars, reduce_vars = vars
split_idx = index_vars.index(split_var)
new_index_size = index_size.copy()
new_index_size[split_idx] = index_size[split_idx] // split_number
new_index_size.insert(split_idx + 1, split_number)
(new_index_vars, _), var_ranges = dependencies.index_vars_no_squeeze(
new_index_size, reduce_size, prefix="y"
)
iter_vars = new_index_vars.copy()
divisor_var = iter_vars.pop(split_idx + 1)
iter_vars[split_idx] = split_number * iter_vars[split_idx] + divisor_var
body = ir.LoopBody(
body, [iter_vars, reduce_vars], var_ranges, new_index_vars, reduce_vars
)
nonlocal extra_indexing_constraints
if not extra_indexing_constraints:
extra_indexing_constraints = (
body.var_ranges,
list(body.indexing_exprs.values()),
)
return (
(new_index_size, reduce_size),
body,
(new_index_vars, reduce_vars),
)
# Here decide the final loop order
for node in nodes:
if node == matched_node:
node.recompute_size_and_body(recompute_sizes_body_func=loop_split)
for node in nodes:
if node != matched_node:
node.recompute_size_and_body(
extra_indexing_constraints=extra_indexing_constraints,
recompute_sizes_body_func=loop_split,
)
return nodes
def codegen_outer_loop_node(
self,
node: OuterLoopFusedSchedulerNode,
):
"""
Generate the code for the outer loop fused scheduler node.
1. Codegen with fused outer loop: depends on the analysis of
the outer loop fused scheduler node, with or without the local buffer.
2. If failed, fallback to standard codegen.
"""
kernel_group = self.kernel_group
generated_cpp_vec_kernel_count = metrics.generated_cpp_vec_kernel_count
cpp_kernel_proxy_list: list[self.kernel_proxy_cls] = [] # type: ignore[name-defined]
nodes_list: list[list[SchedulerNode]] = []
assert isinstance(node, OuterLoopFusedSchedulerNode)
def try_outer_loop_fusion_with_local_buf(node: OuterLoopFusedSchedulerNode):
"""
Codegen code with fused outer loop and local Buffer.
"""
assert isinstance(node, OuterLoopFusedSchedulerNode)
cpp_kernel_proxy_list.clear()
nodes_list.clear()
def get_call_ranges(node: BaseSchedulerNode):
assert isinstance(node, (SchedulerNode, FusedSchedulerNode))
nodes: list[SchedulerNode] = node.get_nodes() # type: ignore[assignment]
_, (group, reduction_group) = max(
nodes, key=lambda x: int(x.is_reduction())
).group
call_ranges = tuple(group) + tuple(reduction_group)
return call_ranges
local_buffers: list[ir.Buffer] = []
# Map local buffer name to a list of global buffers
local_to_global_buffers: dict[str, list[ir.Buffer]] = {}
if all(
len(get_call_ranges(_node)) == node.outer_loop_fusion_depth + 1
for _node in node.get_outer_nodes()
):
# Ref to the typical case of local buffer in
# https://github.com/pytorch/pytorch/blob/1115a25c36340554442f28f9570abd42f0aface2/aten/src/ATen/native/cpu/SoftMaxKernel.cpp#L159 # noqa: B950
# where the buffer is with size of last dim and contiguous.
# Only support this typical case at first.
visited_scheduler_nodes: OrderedSet[str] = OrderedSet()
for scheduler_node in node.get_nodes():
# all users inside same OuterLoopFusedSchedulerNode
assert isinstance(scheduler_node, SchedulerNode)
visited_scheduler_nodes.add(scheduler_node.get_name())
if (
scheduler_node.is_reduction()
or len(scheduler_node.get_outputs()) != 1
):
continue
scheduler_buffer = scheduler_node.get_outputs()[0]
if all(
user.node in node.get_nodes() for user in scheduler_buffer.users
):
global_buffer = scheduler_buffer.node
assert isinstance(global_buffer, ir.ComputedBuffer)
global_buffer_layout = global_buffer.get_layout()
size_offset = node.outer_loop_fusion_depth - len(
get_call_ranges(scheduler_node)
)
def is_all_write_read_contiguous():
contiguous_index_expr = 0
stride = 1
for var, range in reversed(
# pyrefly: ignore [missing-attribute]
scheduler_node._body.var_ranges.items()
):
contiguous_index_expr += stride * var
stride *= range
# pyrefly: ignore [missing-attribute]
write_index_expr = scheduler_node._body.get_write_expr(
scheduler_buffer.get_name()
)
def is_contiguous_index(x):
return x == contiguous_index_expr
return is_contiguous_index(write_index_expr) and all(
isinstance(user.node, SchedulerNode)
and is_contiguous_index(
user.node._body.get_read_expr(
scheduler_buffer.get_name()
),
)
for user in scheduler_buffer.users
)
if not (
global_buffer_layout.is_contiguous()
and is_all_write_read_contiguous()
):
continue
# Local Buffer is a view of global buffer
local_buffer_stride: list[int] = []
stride = global_buffer_layout.stride[-1]
local_buffer_size = get_call_ranges(scheduler_node)[
size_offset:
]
for sz in reversed(local_buffer_size):
local_buffer_stride.insert(0, stride)
stride *= sz
local_buffer_layout = ir.FixedLayout(
global_buffer_layout.device,
global_buffer_layout.dtype,
local_buffer_size,
local_buffer_stride,
)
def try_share_local_buffer(local_buffer_layout, local_buffers):
for local_buf in local_buffers:
if local_buffer_layout == local_buf.layout and all(
all(
user.node.get_name() in visited_scheduler_nodes
for user in V.graph.scheduler.name_to_buf[
global_buffer.name
].users
)
for global_buffer in local_to_global_buffers[
local_buf.name
]
if global_buffer.name is not None
):
return local_buf
return None
local_buf_prefix = "local_buffer_data"
# Share existing local buffer
local_buffer_used = try_share_local_buffer(
local_buffer_layout, local_buffers
)
if not local_buffer_used:
# Create new local buffer
local_buffer_used = ir.Buffer(
name=f"{local_buf_prefix}_{len(local_buffers)}",
layout=local_buffer_layout,
)
local_buffers.append(local_buffer_used)
local_to_global_buffers[local_buffer_used.name] = [] # type: ignore[index]
# pyrefly: ignore [index-error]
local_to_global_buffers[local_buffer_used.name].append(
global_buffer,
)
with LocalBufferContext(kernel_group.args) as scope:
if len(local_buffers) > 0:
for local_buffer in local_buffers:
assert local_buffer.name is not None
scope.add_local_buffer(
local_buffer, local_to_global_buffers[local_buffer.name]
)
for _node in node.get_outer_nodes():
assert isinstance(_node, (FusedSchedulerNode, SchedulerNode))
cpp_kernel_proxy = self.kernel_proxy_cls(kernel_group)
cpp_kernel_proxy.codegen_nodes(_node.get_nodes()) # type: ignore[arg-type]
cpp_kernel_proxy_list.append(cpp_kernel_proxy)
nodes_list.append(_node.get_nodes()) # type: ignore[arg-type]
if not node.check_outer_fusion_loop_level_attr(
cpp_kernel_proxy_list, node.outer_loop_fusion_depth
):
for removed_buffer in scope.removed_buffers:
# Restore the removed buffers by this context before
# fallback to codegen without using Local Buffer
V.graph.removed_buffers.remove(removed_buffer)
return False
metrics.cpp_outer_loop_fused_inner_counts.append(
metrics.CppOuterLoopFusedCount(
len(cpp_kernel_proxy_list),
local_buffer_number=len(scope.local_buffers),
)
)
outer_fusion_cpp_kernel_proxy = node.merge_outer_fusion_kernels(
cpp_kernel_proxy_list,
)
kernel_group.finalize_kernel(
outer_fusion_cpp_kernel_proxy,
[*itertools.chain.from_iterable(nodes_list)],
)
return True
if not try_outer_loop_fusion_with_local_buf(node):
# Reset generated_cpp_vec_kernel_count to codegen again
metrics.generated_cpp_vec_kernel_count = generated_cpp_vec_kernel_count
cpp_kernel_proxy_list.clear()
nodes_list.clear()
# Similar as comment in
# https://github.com/pytorch/pytorch/blob/469383755fe416eb1c41fa724762ad3eaecdff07/torch/_inductor/codegen/cpp.py#L3269-L3272
# Kernels share the same global contexts like V.graph.wrapper_code, V.kernel.args.
with torch._inductor.config.patch(inplace_buffers=False):
for _node in node.get_outer_nodes():
assert isinstance(_node, (FusedSchedulerNode, SchedulerNode))
_nodes: list[SchedulerNode] = _node.get_nodes() # type: ignore[assignment]
cpp_kernel_proxy = self.kernel_proxy_cls(kernel_group)
cpp_kernel_proxy.codegen_nodes(_nodes)
kernel_group.finalize_kernel(cpp_kernel_proxy, _nodes)
def codegen_node(
self,
node: Union[OuterLoopFusedSchedulerNode, FusedSchedulerNode, SchedulerNode],
):
"""
Turn an set of pre-fused nodes into a C++ kernel.
"""
kernel_group = self.kernel_group
if isinstance(node, OuterLoopFusedSchedulerNode):
self.codegen_outer_loop_node(node)
else:
nodes: list[SchedulerNode] = node.get_nodes() # type: ignore[assignment]
nodes = self.try_loop_split(nodes)
cpp_kernel_proxy = self.kernel_proxy_cls(kernel_group)
cpp_kernel_proxy.codegen_nodes(nodes)
kernel_group.finalize_kernel(cpp_kernel_proxy, nodes)
args_num = self._get_scheduled_num_args()
if args_num > CppScheduling.MAX_FUSED_KERNEL_ARGS_NUM:
self._set_flush_status(True)
def is_cpp_template(self, node: BaseSchedulerNode) -> bool:
return isinstance(node, SchedulerNode) and isinstance(
node.node, ir.CppTemplateBuffer
)
def codegen_template(
self,
template_node: BaseSchedulerNode,
epilogue_nodes: Sequence[BaseSchedulerNode],
prologue_nodes: Sequence[BaseSchedulerNode],
):
"""
Codegen a CPP template, possibly with fused epilogues
"""
assert not prologue_nodes
# remove MultiOutput from epilogue_nodes
epilogue_nodes = [
epilogue_node
for epilogue_node in epilogue_nodes
if isinstance(epilogue_node, (SchedulerNode, FusedSchedulerNode))
]
# The counter cpp_templated_kernel_counter is used for verifying if a
# a templated kernel was successfully compiled in a UT
counters["inductor"]["cpp_templated_kernel_counter"] += 1
counters["inductor"]["cpp_epilogue_fusion_counter"] += len(epilogue_nodes)
assert self.is_cpp_template(template_node), (
"Template node passed to CppScheduler.codegen_template must be a SchedulerNode that wraps a CppTemplateBuffer"
)
template_node = cast(SchedulerNode, template_node)
_, (_, rnumel) = template_node.group
assert rnumel == ()
ctb: ir.CppTemplateBuffer = cast(ir.CppTemplateBuffer, template_node.node)
epilogue_ir_nodes: list[Optional[ir.Operation]] = [
n.node for n in epilogue_nodes
]
assert all(isinstance(n, ir.ComputedBuffer) for n in epilogue_ir_nodes), (
"Epilogue nodes must all be instances of ir.ComputedBuffer"
)
def template_buffer_has_other_users(
template_buffer, outputs_by_name, epilogue_nodes
):
if not epilogue_nodes:
return False
assert template_buffer.get_name() in outputs_by_name
users = outputs_by_name[template_buffer.get_name()].users
return not all(
isinstance(user.node, BaseSchedulerNode)
and user.node.node in epilogue_nodes
for user in users
)
flag_template_buffer_has_other_users = template_buffer_has_other_users(
ctb, template_node.outputs_by_name, epilogue_ir_nodes
)
kernel, render = ctb.make_kernel_render( # type: ignore[misc]
ctb,
flag_template_buffer_has_other_users=flag_template_buffer_has_other_users,
epilogue_nodes=epilogue_ir_nodes,
)
with kernel:
if not is_multi_outputs_template(template_node.node):
template_node.mark_run() # type: ignore[attr-defined]
for node in epilogue_nodes:
node.mark_run() # type: ignore[attr-defined]
src_code = render()
with V.set_kernel_handler(kernel):
node_schedule = [template_node, *epilogue_nodes]
kernel_name = self.define_kernel(src_code, node_schedule, kernel.args)
if is_multi_outputs_template(template_node.node):
# For multi outputs template, allocate buffers for each output after the epilogue
# codegen to which determines if the buffer has been removed.
assert len(template_node.outputs) == 1, (
"Multi outputs template should be with 1 output template buffer of MultiOutputLayout"
)
for user in template_node.outputs[0].users:
assert isinstance(user.node, ExternKernelSchedulerNode), (
"Multi outputs template should be with ExternKernelSchedulerNode"
)
assert isinstance(user.node.node, ir.MultiOutput), (
"Multi outputs template has multi users with MultiOutput"
)
user.node.mark_run()
self.codegen_comment(node_schedule, kernel_name)
kernel.call_kernel(kernel_name, ctb)
V.graph.removed_buffers |= kernel.removed_buffers
self.free_buffers_in_scheduler()
def _get_scheduled_num_args(self):
return self.kernel_group.get_num_args()
def ready_to_flush(self):
return self._ready_to_flush
def codegen_sync(self):
pass
def define_kernel(self, src_code, nodes, kernel_args=None):
wrapper = V.graph.wrapper_code
if src_code in wrapper.src_to_kernel:
kernel_name = wrapper.src_to_kernel[src_code]
else:
fused_name = (
get_fused_kernel_name(nodes, config.cpp.descriptive_names)
if config.cpp.descriptive_names
else ""
)
kernel_name = "_".join(["cpp", fused_name, wrapper.next_kernel_suffix()])
wrapper.src_to_kernel[src_code] = kernel_name
kernel_decl_name = kernel_name if V.graph.cpp_wrapper else "kernel"
src_code = src_code.replace(str(Placeholder.KERNEL_NAME), kernel_decl_name)
src_code = src_code.replace(str(Placeholder.DESCRIPTIVE_NAME), kernel_name)
# TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does
# not use BracesBuffer, so we have no good indicator of a C++ buffer atm.
src_code = src_code.replace("#pragma CMT", "//")
# Get the lines in the source code representing the function definition,
# excluding the first line including cpp_prefix.h.
first_char = src_code.rfind('extern "C"')
last_char = src_code.find(")", first_char)
if _IS_WINDOWS:
# get_export_declaration introduced one more ')' in Windows
last_char = src_code.find(")", last_char + 1)
kernel_definition = f"{src_code[first_char : last_char + 1]};\n"
compile_wrapper = IndentedBuffer()
args = self.kernel_group.args if kernel_args is None else kernel_args
_, _, arg_types = args.cpp_argdefs()
if not V.graph.cpp_wrapper:
compile_wrapper.writeline(
f"async_compile.cpp_pybinding({arg_types!r}, r'''"
)
compile_wrapper.splice(src_code, strip=True)
if not V.graph.cpp_wrapper:
compile_wrapper.writeline("''')")
wrapper.define_kernel(
kernel_name,
compile_wrapper.getvalue(),
gpu=False,
cpp_definition=kernel_definition,
)
return kernel_name
def flush(self):
src_code = self.kernel_group.codegen_group()
if src_code:
kernel_name = self.define_kernel(
src_code, self.kernel_group.scheduled_nodes
)
self.codegen_comment(self.kernel_group.scheduled_nodes, kernel_name)
if config.cpp.enable_kernel_profile:
V.graph.wrapper_code.write_kernel_context_guard_begin()
V.graph.wrapper_code.write_kernel_context_guard(
kernel_name,
self.kernel_group.scheduled_nodes, # type: ignore[arg-type]
)
self.kernel_group.call_kernel(V.graph.wrapper_code, kernel_name)
if config.cpp.enable_kernel_profile:
V.graph.wrapper_code.write_kernel_context_guard_end()
self.reset_kernel_group()
self._set_flush_status(False)
def codegen_comment(self, node_schedule, kernel_name=None):
# below add provenance tracing info for cpu CppKernel types
wrapper = V.graph.wrapper_code
debug_handle = set_kernel_post_grad_provenance_tracing(
node_schedule, # type: ignore[arg-type]
# pyrefly: ignore [bad-argument-type]
kernel_name,
)
wrapper.write_provenance_debug_handle(kernel_name, debug_handle)
| CppScheduling |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 8162,
"end": 10374
} | class ____(LossFunctionWrapper):
"""Computes the cosine similarity between `y_true` & `y_pred`.
Note that it is a number between -1 and 1. When it is a negative number
between -1 and 0, 0 indicates orthogonality and values closer to -1
indicate greater similarity. This makes it usable as a loss function in a
setting where you try to maximize the proximity between predictions and
targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity
will be 0 regardless of the proximity between predictions and targets.
Formula:
```python
loss = -sum(l2_norm(y_true) * l2_norm(y_pred))
```
Args:
axis: The axis along which the cosine similarity is computed
(the features axis). Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
axis=-1,
reduction="sum_over_batch_size",
name="cosine_similarity",
dtype=None,
):
super().__init__(
cosine_similarity,
name=name,
reduction=reduction,
dtype=dtype,
axis=axis,
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.Huber")
| CosineSimilarity |
python | joke2k__faker | faker/providers/job/sk_SK/__init__.py | {
"start": 41,
"end": 16519
} | class ____(JobProvider):
"""Translated from Super class"""
jobs = (
"Administrátor, umenie",
"Administrátor, štátna služba",
"Advokát",
"Advokát pre ochranné známky",
"Akademický knihovník",
"Akupunkturista",
"Analytický chemik",
"Analytik finančného rizika",
"Angličtina ako lektorka cudzieho jazyka",
"Angličtina ako učiteľka druhého jazyka",
"Animátor",
"Arborista",
"Archeológ",
"Architekt",
"Architektonický technológ",
"Archivár",
"Arteterapeut",
"Asistent politika",
"Astronóm",
"Audiologický vedec",
"Automobilový inžinier",
"Autorizovaný likvidátor strát",
"Autorizovaný účtovník",
"Autorizovaný účtovník v oblasti verejných financií",
"Bankár",
"Banský inžinier",
"Barista",
"Biochemik, klinický",
"Biomedicínsky inžinier",
"Biomedicínsky vedec",
"Bylinkár",
"Bytový manažér / referent",
"Charitatívny úradník",
"Chemický inžinier",
"Chemik, analytický",
"Chiropraktik",
"Chirurg",
"Copywriter, reklama",
"Cytogenetik",
"Daňový poradca",
"Detská sestra",
"Detský psychoterapeut",
"Diagnostický rádiograf",
"Dietológ",
"Dizajnér, fúkané sklo / vitráž",
"Dizajnér, grafik",
"Dizajnér, interiér / priestor",
"Dizajnér, keramika / hrnčiarstvo",
"Dizajnér, multimédiá",
"Dizajnér, móda / oblečenie",
"Dizajnér, nábytok",
"Dizajnér, priemyselný / produkt",
"Dizajnér, televízia / film",
"Dizajnér, textil",
"Dizajnér, výstava / výstava",
"Dizajnér, šperky",
"Docent",
"Dodávateľ",
"Dospelý poradenský pracovník",
"Dozorca",
"Dramatický terapeut",
"Dôstojník obchodného námorníctva",
"Dôstojník pre ochranu prírody",
"Dôstojník pre výcvik a vzdelávanie ozbrojených síl",
"Editor funkcie časopisu",
"Ekológ",
"Ekonóm",
"Elektroinžinier",
"Embryológ, klinický",
"Energetický inžinier",
"Energetický manažér",
"Environmentálny manažér",
"Ergonóm",
"Farebný technológ",
"Farmaceut Spoločenstva",
"Farmakológ",
"Filmový / video editor",
"Financny kontrolor",
"Finančný manažér",
"Finančný obchodník",
"Finančný plánovač",
"Finančný poradca",
"Finančný riaditeľ",
"Firemná sekretárka",
"Fotograf",
"Fytoterapeut",
"Fyzik zdravia",
"Fyzik, lekár",
"Fyziologický vedec",
"Fyziológ cvičenia",
"Fyzioterapeut",
"Fúkač skla / dizajnér",
"Genetik, molekulárny",
"Geochemik",
"Geodet minerálov",
"Geodet poistného rizika",
"Geofyzik / terénny seizmológ",
"Geológ, strojárstvo",
"Geológ",
"Geovedec",
"Grafický dizajnér",
"Grafik",
"Hasič",
"Hematológ",
"Herec",
"Herpetológ",
"Hlavný marketingový riaditeľ",
"Homeopat",
"Hotelový manažér",
"Hudobník",
"Hudobný lektor",
"Hudobný terapeut",
"Hutník",
"Hydrogeológ",
"Hydrografický geodet",
"Hydrológ",
"Hygienik práce",
"IT konzultant",
"Ilustrátor",
"Imunológ",
"Informačný úradník",
"Investičný analytik",
"Investičný bankár, funkčný",
"Investičný bankár, podnikový",
"Inšpektor / hodnotiteľ reklamácií",
"Inšpektor historických budov / referent pamiatkovej starostlivosti",
"Inšpektor plánovania a rozvoja",
"Inšpektor zdravia a bezpečnosti",
"Inžinier budov",
"Inžinier elektroniky",
"Inžinier kontroly a prístrojového vybavenia",
"Inžinier poľnohospodárstva",
"Inžinier pre automobilový priemysel",
"Inžinier výrobných systémov",
"Inžinier, baníctvo",
"Inžinier, biomedicínsky",
"Inžinier, chemický",
"Inžinier, elektronika",
"Inžinier, elektrotechnik",
"Inžinier, energia",
"Inžinier, komunikácia",
"Inžinier, letecký",
"Inžinier, materiály",
"Inžinier, pozemok",
"Inžinier, poľnohospodár",
"Inžinier, riadenie a prístrojové vybavenie",
"Inžinier, ropa",
"Inžinier, statik",
"Inžinier, stavebné služby",
"Inžinier, stavebný (zmluvný)",
"Inžinier, stavebný inžinier (poradenstvo)",
"Inžinier, technický predaj",
"Inžinier, voda",
"Inžinier, vysielanie (prevádzka)",
"Inžinier, výroba",
"Inžinier, výroba",
"Inžinier, výrobné systémy",
"Inžinier, vŕtanie",
"Inžinier, web",
"Inžinier, údržba",
"Inžinier, údržba (IT)",
"Inžiniersky geológ",
"Kameraman",
"Kariérny informačný úradník",
"Kariérny poradca",
"Kariérny poradca pre vysokoškolské vzdelávanie",
"Kartograf",
"Klinický biochemik",
"Klinický cytogenetik",
"Klinický embryológ",
"Klinický molekulárny genetik",
"Klinický psychológ",
"Klinický vedec, histokompatibilita a imunogenetika",
"Knihovník",
"Knihovník, verejný",
"Kníhkupec",
"Komerčný / rezidenčný geodet",
"Komerčný záhradník",
"Komunikačný inžinier",
"Komunitný umelecký pracovník",
"Konateľ spoločnosti",
"Kontrolór",
"Konzervátor / reštaurátor nábytku",
"Konzervátor múzea / galérie",
"Konzervátor, múzeum / galéria",
"Konzervátor, nábytok",
"Konzultant pre dôchodky",
"Konzultácia so stavebným inžinierom",
"Koordinátor dobrovoľníctva",
"Kupujúci, maloobchod",
"Kurátor",
"Kurátor múzea / galérie",
"Lektor ďalšieho vzdelávania",
"Lektor, vysokoškolské vzdelanie",
"Lektor, ďalšie vzdelávanie",
"Lekár všeobecného lekára",
"Lekár, nemocnica",
"Lekár, všeobecná prax",
"Lekárnik, komunita",
"Lekárnik, nemocnica",
"Lekársky fyzik",
"Lekársky ilustrátor",
"Lekársky obchodný zástupca",
"Lekársky sekretár",
"Lekársky technický pracovník",
"Letecký dispečer",
"Letecký inžinier",
"Letecký sprostredkovateľ",
"Lexikograf",
"Licencovaný dopravca",
"Lobista",
"Logistika / podpora / administratívny dôstojník ozbrojených síl",
"Manažér call centra",
"Manažér cestovnej kancelárie",
"Manažér divadelnej scény",
"Manažér farmy",
"Manažér fitnescentra",
"Manažér informačných systémov",
"Manažér komerčnej umeleckej galérie",
"Manažér logistiky a distribúcie",
"Manažér stravovania",
"Manažér umeleckej galérie",
"Manažér zariadení",
"Manažér zábavného parku",
"Manžérsky konzultant",
"Marketingový manažér",
"Materiálový inžinier",
"Mediálny plánovač",
"Meteorológ",
"Mikrobiológ",
"Moderátor, vysielanie",
"Morský vedec",
"Multimediálne programy",
"Módny návrhár",
"Najlepší chlapec",
"Nemocničný lekár",
"Nemocničný lekárnik",
"Neurochirurg",
"Novinár novín",
"Novinár časopisu",
"Novinár, noviny",
"Novinár, vysielanie",
"Novinár, časopis",
"Nákupca médií",
"Nákupca, priemyselný",
"Námorný architekt",
"Návrhár interiérov a priestorov",
"Návrhár nábytku",
"Návrhár výstavy",
"Návrhár šperkov",
"Návrhárka keramiky",
"Obchodník s akciami",
"Obchodník s dlhopismi",
"Obchodník s futures",
"Oceánograf",
"Ochranár, historické budovy",
"Odborník na životné prostredie",
"Odevný / textilný technológ",
"Onkológ",
"Operatívny výskumník",
"Operačný dôstojník diplomatických služieb",
"Operačný dôstojník ozbrojených síl",
"Optik, výdaj",
"Optometristu",
"Organizácia podujatia",
"Ortoptista",
"Osobný asistent",
"Osteopat",
"Očný lekár",
"Palubní sprievodcovia",
"Patent attorney",
"Patológ",
"Pedagogický psychológ",
"Pedikér",
"Personalista",
"Pilot leteckej spoločnosti",
"Plánovač dopravy",
"Plánovač reklamného účtu",
"Plánovač tlače",
"Podnikový investičný bankár",
"Podnikový pokladník",
"Poistný matematik",
"Poisťovací maklér",
"Poisťovateľ",
"Police officer",
"Poradca pre zdravie a bezpečnosť",
"Poradca pre životné prostredie",
"Poradenská pracovníčka",
"Poradenský psychológ",
"Posádka",
"Potravinársky technológ",
"Poľnohospodársky konzultant",
"Pracovník medzinárodnej pomoci / rozvoja",
"Pracovník pomoci",
"Pracovník rozvoja komunity",
"Pracovník s mládežou",
"Pracovný psychológ",
"Pracovný terapeut",
"Predajca",
"Prekladateľ",
"Prevádzkovateľ televíznej kamery",
"Prevádzkový geológ",
"Prevádzkový investičný bankár",
"Prevádzkový riaditeľ",
"Priemyselný / produktový dizajnér",
"Priemyselný kupujúci",
"Prieskumník trhu",
"Prieskumový pracovník",
"Probačný úradník",
"Producent, rádio",
"Producent, televízia / film / video",
"Production assistant, radio",
"Production assistant, television",
"Production designer, theatre/television/film",
"Production engineer",
"Production manager",
"Produktový dizajnér",
"Produktový manažér",
"Professor Emeritus",
"Programme researcher, broadcasting/film/video",
"Programmer, applications",
"Programmer, multimedia",
"Programmer, systems",
"Proofreader",
"Právnik",
"Právny tajomník",
"Prázdninový zástupca",
"Psychiatric nurse",
"Psychiatrist",
"Psychologist, clinical",
"Psychologist, counselling",
"Psychologist, educational",
"Psychologist, forensic",
"Psychologist, occupational",
"Psychologist, prison and probation services",
"Psychologist, sport and exercise",
"Psychoterapeut tanečného pohybu",
"Psychoterapeut",
"Pôda",
"Pôrodná asistentka",
"Manažér kvality",
"Radca",
"Realitný maklér",
"Redaktor, uvedenie do prevádzky",
"Redakčný asistent",
"Referent cestovného ruchu",
"Referent environmentálnej výchovy",
"Referent geografických informačných systémov",
"Referent komunitného vzdelávania",
"Referent múzejného vzdelávania",
"Referent obchodných noriem",
"Referent ochrany prírody",
"Referent odbornej prípravy a rozvoja",
"Referent odborového výskumu",
"Referent poľných pokusov",
"Referent pre núdzové plánovanie / riadenie",
"Referent pre rovnosť a rozmanitosť",
"Referent pre výstavy v múzeách / galériách",
"Referent rozvoja umenia",
"Referent technickej podpory IT",
"Referent výstavy, múzeum / galéria",
"Referent ľudských zdrojov",
"Referent školstva pre životné prostredie",
"Referent školstva, komunita",
"Referent školstva, múzeum",
"Regulátor strát, objednaný",
"Reklamný textár",
"Reklamný umelecký riaditeľ",
"Riaditeľ pre stratégiu",
"Ropný inžinier",
"Rozvojový pracovník, komunita",
"Rozvojový pracovník, medzinárodná pomoc",
"Rýchly streamer pre štátnu službu",
"Sanitka",
"Sestra pre dospelých",
"Sestra pre duševné zdravie",
"Sestra s poruchami učenia",
"Sestra, detská",
"Sestra, dospelý",
"Sestra, porucha učenia",
"Sieťový inžinier",
"Spisovateľ",
"Spolupracovník pre klinický výskum",
"Spracovateľ geofyzikálnych údajov",
"Spravodajský analytik",
"Správca",
"Správca databázy",
"Správca dedičstva",
"Správca dôchodkového systému",
"Správca lesov a lesov",
"Správca nehnuteľnosti / pozemkový agent",
"Správca poistného účtu",
"Správca polohy",
"Správca spracovania údajov",
"Správca umenia",
"Správca zákazníckeho centra",
"Správca školstva",
"Správca štátnej služby",
"Správca, charitatívne / dobrovoľnícke organizácie",
"Správca, miestna samospráva",
"Správca, vzdelávanie",
"Správca, šport",
"Stavebný geodet",
"Stavebný inžinier, poradenstvo",
"Stavebný inžinier, uzatváranie zmlúv",
"Strihač, film / video",
"Strojný inžinier",
"Strážca / strážca",
"Svetelný technik, vysielanie / film / video",
"Súdny psychológ",
"Súdny vedec",
"Súkromný učiteľ hudby",
"Tanečnica",
"Technický dôstojník ozbrojených síl",
"Technik údržby",
"Technológ pre zvieratá",
"Technológ varenia piva",
"Terapeut, dráma",
"Terapeut, hudba",
"Terapeut, záhradnícky",
"Terapeut, šport",
"Terénny seizmológ",
"Tlačový fotograf",
"Tlmočník",
"Toxikológ",
"Umelec",
"Urobiť",
"Uvádzací redaktor",
"Učiaci sa mentor",
"Učiteľ v ranom detstve",
"Učiteľ, angličtina ako cudzí jazyk",
"Učiteľ, hudba",
"Učiteľ, prvé roky / pred",
"Učiteľ, vzdelávanie dospelých",
"Učiteľ, základná škola",
"Učiteľka na základnej škole",
"Vedec dát",
"Vedec pre kvalitu vody",
"Vedec vývoja produktov / procesov",
"Vedecký pracovník lekárskeho laboratória",
"Vedúci kancelárie",
"Vedúci konferenčného centra",
"Vedúci osobnej dopravy",
"Vedúci outdoorových aktivít / vzdelávania",
"Vedúci reklamného účtu",
"Vedúci reštaurácie rýchleho občerstvenia",
"Vedúci rybej farmy",
"Vedúci skladu",
"Vedúci strediska voľného času",
"Vedúci turistického informačného centra",
"Vedúci ubytovania",
"Vedúci zdravotníckej služby",
"Vedúci úseku",
"Veterinárny chirurg",
"Video editor",
"Vizuálny obchodník",
"Vládny úradník pre sociálny výskum",
"Vodný inžinier",
"Vrtný inžinier",
"Vybavenie záhradník",
"Vybavovač poistných udalostí",
"Vysielaný novinár",
"Vysokoškolský lektor",
"Výdajný optik",
"Výkonný riaditeľ",
"Výkonný technický riaditeľ",
"Výrobný inžinier",
"Výtvarný umelec",
"Vývojár aplikácií",
"Vývojár hier",
"Vývojár počítačových hier",
"Vývojár systémov",
"Výživový poradca pre zvieratá",
"Výživový terapeut",
"Web dizajnér",
"Wellsite geológ",
"Zamestnanec imigračného úradu",
"Zdravotná sestra, duševné zdravie",
"Zdravotný návštevník",
"Zememerač / geomatik",
"Zmluvný stavebný inžinier",
"Zubár",
"Záchranár",
"Záhradnícky konzultant",
"Záhradnícky terapeut",
"Záhradník, komerčný",
"Záhradník, vybavenosť",
"Záhradný architekt",
"Záznamník bahna",
"Úradník miestnej samosprávy",
"Úradník pre rybolov",
"Účtovník, autorizované verejné financie",
"Účtovník, autorizovaný",
"Účtovník, autorizovaný certifikovaný",
"Účtovník, autorizovaný manažment",
"Účtovný technik",
"Špecialista na multimédiá",
"Špecialista na podporu zdravia",
"Špeditér",
"Šľachtiteľ rastlín / genetik",
)
def job(self) -> str:
return self.random_element(self.jobs)
| Provider |
python | eventlet__eventlet | tests/isolated/wsgi_connection_timeout.py | {
"start": 998,
"end": 1149
} | class ____:
@staticmethod
def write(s):
output_buffer.append(s.rstrip())
return len(s)
# This test might make you wince
| BufferLog |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton_addmm.py | {
"start": 231,
"end": 1085
} | class ____(TemplateConfigHeuristics):
"""
Simple mixin to handle scalars for addmm like operators (addmm, baddbmm)
"""
def get_extra_kwargs(
self,
kernel_inputs: KernelInputs,
op_name: str,
) -> dict[str, Any]:
kwargs = super().get_extra_kwargs(kernel_inputs, op_name)
assert op_name in [
"addmm",
"baddbmm",
], f"op_name={op_name} invalid for AddMMConfigMixin"
alpha = kernel_inputs.get_scalar("alpha")
beta = kernel_inputs.get_scalar("beta")
return {
**kwargs,
"epilogue_fn": addmm_epilogue(kernel_inputs.out_dtype(), alpha, beta),
"epilogue_fn_hash": str(
["addmm_epilogue", kernel_inputs.out_dtype(), alpha, beta]
),
"prefix_args": 1,
}
| AddMMConfigMixin |
python | mwaskom__seaborn | seaborn/_core/properties.py | {
"start": 20183,
"end": 26959
} | class ____(Property):
"""Color, as RGB(A), scalable with nominal palettes or continuous gradients."""
legend = True
normed = True
def standardize(self, val: ColorSpec) -> RGBTuple | RGBATuple:
# Return color with alpha channel only if the input spec has it
# This is so that RGBA colors can override the Alpha property
if to_rgba(val) != to_rgba(val, 1):
return to_rgba(val)
else:
return to_rgb(val)
def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:
"""Convert color sequence to RGB(A) array, preserving but not adding alpha."""
def has_alpha(x):
return to_rgba(x) != to_rgba(x, 1)
if isinstance(colors, np.ndarray):
needs_alpha = colors.shape[1] == 4
else:
needs_alpha = any(has_alpha(x) for x in colors)
if needs_alpha:
return to_rgba_array(colors)
else:
return to_rgba_array(colors)[:, :3]
def infer_scale(self, arg: Any, data: Series) -> Scale:
# TODO when inferring Continuous without data, verify type
# TODO need to rethink the variable type system
# (e.g. boolean, ordered categories as Ordinal, etc)..
var_type = variable_type(data, boolean_type="boolean", strict_boolean=True)
if var_type == "boolean":
return Boolean(arg)
if isinstance(arg, (dict, list)):
return Nominal(arg)
if isinstance(arg, tuple):
if var_type == "categorical":
# TODO It seems reasonable to allow a gradient mapping for nominal
# scale but it also feels "technically" wrong. Should this infer
# Ordinal with categorical data and, if so, verify orderedness?
return Nominal(arg)
return Continuous(arg)
if callable(arg):
return Continuous(arg)
# TODO Do we accept str like "log", "pow", etc. for semantics?
if not isinstance(arg, str):
msg = " ".join([
f"A single scale argument for {self.variable} variables must be",
f"a string, dict, tuple, list, or callable, not {type(arg)}."
])
raise TypeError(msg)
if arg in QUAL_PALETTES:
return Nominal(arg)
elif var_type == "numeric":
return Continuous(arg)
# TODO implement scales for date variables and any others.
else:
return Nominal(arg)
def get_mapping(self, scale: Scale, data: Series) -> Mapping:
"""Return a function that maps from data domain to color values."""
# TODO what is best way to do this conditional?
# Should it be class-based or should classes have behavioral attributes?
if isinstance(scale, Nominal):
return self._get_nominal_mapping(scale, data)
elif isinstance(scale, Boolean):
return self._get_boolean_mapping(scale, data)
if scale.values is None:
# TODO Rethink best default continuous color gradient
mapping = color_palette("ch:", as_cmap=True)
elif isinstance(scale.values, tuple):
# TODO blend_palette will strip alpha, but we should support
# interpolation on all four channels
mapping = blend_palette(scale.values, as_cmap=True)
elif isinstance(scale.values, str):
# TODO for matplotlib colormaps this will clip extremes, which is
# different from what using the named colormap directly would do
# This may or may not be desireable.
mapping = color_palette(scale.values, as_cmap=True)
elif callable(scale.values):
mapping = scale.values
else:
scale_class = scale.__class__.__name__
msg = " ".join([
f"Scale values for {self.variable} with a {scale_class} mapping",
f"must be string, tuple, or callable; not {type(scale.values)}."
])
raise TypeError(msg)
def _mapping(x):
# Remove alpha channel so it does not override alpha property downstream
# TODO this will need to be more flexible to support RGBA tuples (see above)
invalid = ~np.isfinite(x)
out = mapping(x)[:, :3]
out[invalid] = np.nan
return out
return _mapping
def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:
levels = categorical_order(data, scale.order)
colors = self._get_values(scale, levels)
def mapping(x):
ixs = np.asarray(np.nan_to_num(x), np.intp)
use = np.isfinite(x)
out = np.full((len(ixs), colors.shape[1]), np.nan)
out[use] = np.take(colors, ixs[use], axis=0)
return out
return mapping
def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:
colors = self._get_values(scale, [True, False])
def mapping(x):
use = np.isfinite(x)
x = np.asarray(np.nan_to_num(x)).astype(bool)
out = np.full((len(x), colors.shape[1]), np.nan)
out[x & use] = colors[0]
out[~x & use] = colors[1]
return out
return mapping
def _get_values(self, scale: Scale, levels: list) -> ArrayLike:
"""Validate scale.values and identify a value for each level."""
n = len(levels)
values = scale.values
if isinstance(values, dict):
self._check_dict_entries(levels, values)
colors = [values[x] for x in levels]
elif isinstance(values, list):
colors = self._check_list_length(levels, values)
elif isinstance(values, tuple):
colors = blend_palette(values, n)
elif isinstance(values, str):
colors = color_palette(values, n)
elif values is None:
if n <= len(get_color_cycle()):
# Use current (global) default palette
colors = color_palette(n_colors=n)
else:
colors = color_palette("husl", n)
else:
scale_class = scale.__class__.__name__
msg = " ".join([
f"Scale values for {self.variable} with a {scale_class} mapping",
f"must be string, list, tuple, or dict; not {type(scale.values)}."
])
raise TypeError(msg)
return self._standardize_color_sequence(colors)
# =================================================================================== #
# Properties that can take only two states
# =================================================================================== #
| Color |
python | great-expectations__great_expectations | scripts/cleanup/cleanup_big_query.py | {
"start": 299,
"end": 2224
} | class ____(BaseSettings):
"""Environment variables for BigQuery connection.
These are injected in via CI, but when running locally, you may use your own credentials.
GOOGLE_APPLICATION_CREDENTIALS must be kept secret
"""
GE_TEST_GCP_PROJECT: str
GE_TEST_BIGQUERY_DATASET: str
GOOGLE_APPLICATION_CREDENTIALS: str
@property
def connection_string(self) -> str:
return f"bigquery://{self.GE_TEST_GCP_PROJECT}/{self.GE_TEST_BIGQUERY_DATASET}?credentials_path={self.GOOGLE_APPLICATION_CREDENTIALS}"
# Schema patterns for different test types
SCHEMA_PATTERN_TEST = "^test_[a-z]{10}$" # General SQL testing framework
SCHEMA_PATTERN_PY_VERSION = "^py3[0-9]{1,2}_i[a-f0-9]{32}$" # Python version-specific test schemas
SCHEMA_FORMAT = f"{SCHEMA_PATTERN_TEST}|{SCHEMA_PATTERN_PY_VERSION}"
def cleanup_big_query(config: BigQueryConnectionConfig) -> None:
engine = create_engine(url=config.connection_string)
with engine.connect() as conn, conn.begin():
results = conn.execute(
TextClause(
"""
SELECT 'DROP SCHEMA ' || schema_name || ' CASCADE;'
FROM INFORMATION_SCHEMA.SCHEMATA
WHERE REGEXP_CONTAINS(schema_name, :schema_format)
AND creation_time < TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 1 HOUR);
"""
),
{"schema_format": SCHEMA_FORMAT},
).fetchall()
if results:
to_run = TextClause("\n".join([row[0] for row in results]))
conn.execute(to_run)
logger.info(f"Cleaned up {len(results)} BigQuery schema(s)")
else:
logger.info("No BigQuery schemas to clean up!")
engine.dispose()
if __name__ == "__main__":
config = BigQueryConnectionConfig() # type: ignore[call-arg] # pydantic populates from env vars
cleanup_big_query(config)
| BigQueryConnectionConfig |
python | apache__airflow | dev/breeze/tests/test_release_date_validation.py | {
"start": 910,
"end": 2676
} | class ____:
"""Test validation of planned release date format YYYY_MM_DD[_NN]."""
@pytest.mark.parametrize(
"date_value",
[
"2025-11-16",
"2025-11-16_01",
"2025-11-16_99",
"2025-01-01",
"2024-02-29", # Leap year
"2025-12-31",
],
)
def test_valid_date_formats(self, date_value):
"""Test that valid date formats are accepted."""
# The function is a click callback, so we pass None for ctx and param
result = validate_release_date(None, None, date_value)
assert result == date_value
def test_empty_value(self):
"""Test that empty value is accepted."""
result = validate_release_date(None, None, "")
assert result == ""
@pytest.mark.parametrize(
("date_value", "error_pattern"),
[
("2025_11_16", "YYYY-MM-DD"), # Wrong separator (underscores)
("2025-11-16_1", "YYYY-MM-DD"), # Wrong NN format (needs 2 digits)
("25-11-16", "YYYY-MM-DD"), # Wrong year format (needs 4 digits)
("2025-13-16", "Invalid date"), # Invalid month (13)
("2025-11-32", "Invalid date"), # Invalid day (32)
("2025-02-30", "Invalid date"), # Invalid date (Feb 30)
("2025-00-01", "Invalid date"), # Invalid month (0)
("2025-11-00", "Invalid date"), # Invalid day (0)
],
)
def test_invalid_date_formats(self, date_value, error_pattern):
"""Test that invalid date formats are rejected."""
from click import BadParameter
with pytest.raises(BadParameter, match=error_pattern):
validate_release_date(None, None, date_value)
| TestPlannedReleaseDateValidation |
python | py-pdf__pypdf | pypdf/_crypt_providers/_base.py | {
"start": 1480,
"end": 1670
} | class ____:
def encrypt(self, data: bytes) -> bytes: # pragma: no cover
return data
def decrypt(self, data: bytes) -> bytes: # pragma: no cover
return data
| CryptBase |
python | python-markdown__markdown | scripts/griffe_extensions.py | {
"start": 1656,
"end": 4122
} | class ____(Extension):
""" Griffe extension to insert a table of processor priority in specified functions. """
def __init__(self, paths: list[str] | None = None) -> None:
super().__init__()
self.paths = paths
def linked_obj(self, value: str, path: str) -> str:
""" Wrap object name in reference link. """
return f'[`{value}`][{path}.{value}]'
def on_function_instance(self, node: ast.AST | ObjectNode, func: Function, agent: Visitor | Inspector, **kwargs: Any) -> None: # noqa: ARG002
"""Add table to specified function docstrings."""
if self.paths and func.path not in self.paths:
return # skip objects that were not selected
# Table header
data = [
'Class Instance | Name | Priority',
'-------------- | ---- | :------:'
]
# Extract table body from source code of function.
for obj in node.body:
# Extract the arguments passed to `util.Registry.register`.
if isinstance(obj, ast.Expr) and isinstance(obj.value, ast.Call) and obj.value.func.attr == 'register':
_args = obj.value.args
cls = self.linked_obj(_args[0].func.id, func.path.rsplit('.', 1)[0])
name = _args[1].value
priority = str(_args[2].value)
if func.name == ('build_inlinepatterns'):
# Include Pattern: first arg passed to class
if isinstance(_args[0].args[0], ast.Constant):
# Pattern is a string
value = f'`"{_args[0].args[0].value}"`'
else:
# Pattern is a variable
value = self.linked_obj(_args[0].args[0].id, func.path.rsplit('.', 1)[0])
cls = f'{cls}({value})'
data.append(f'{cls} | `{name}` | `{priority}`')
table = '\n'.join(data)
body = (
f"Return a [`{func.returns.canonical_name}`][{func.returns.canonical_path}] instance which contains "
"the following collection of classes with their assigned names and priorities.\n\n"
f"{table}"
)
# Add to docstring.
if not func.docstring:
func.docstring = Docstring("", parent=func)
sections = func.docstring.parsed
sections.append(DocstringSectionText(body, title="Priority Table"))
| PriorityTableExtension |
python | viewflow__viewflow | viewflow/workflow/migrations/0002_fsmchange.py | {
"start": 565,
"end": 935
} | class ____(migrations.Migration):
dependencies = [
("viewflow", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="task",
name="comments",
field=models.TextField(blank=True, null=True),
preserve_default=True,
),
migrations.RunPython(update_status),
]
| Migration |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py | {
"start": 19562,
"end": 19634
} | class ____(Sam2VideoPromptEncoder):
pass
| Sam3TrackerVideoPromptEncoder |
python | doocs__leetcode | solution/2500-2599/2568.Minimum Impossible OR/Solution.py | {
"start": 0,
"end": 159
} | class ____:
def minImpossibleOR(self, nums: List[int]) -> int:
s = set(nums)
return next(1 << i for i in range(32) if 1 << i not in s)
| Solution |
python | getsentry__sentry | src/sentry/seer/endpoints/organization_seer_explorer_update.py | {
"start": 838,
"end": 2602
} | class ____(OrganizationEndpoint):
publish_status = {
"POST": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ML_AI
permission_classes = (OrganizationSeerExplorerUpdatePermission,)
def post(self, request: Request, organization: Organization, run_id: int) -> Response:
"""
Send an update event to explorer for a given run.
"""
user = request.user
if not features.has(
"organizations:gen-ai-features", organization, actor=user
) or not features.has("organizations:seer-explorer", organization, actor=user):
return Response({"detail": "Feature flag not enabled"}, status=400)
if organization.get_option("sentry:hide_ai_features"):
return Response(
{"detail": "AI features are disabled for this organization."}, status=403
)
if not get_seer_org_acknowledgement(organization):
return Response(
{"detail": "Seer has not been acknowledged by the organization."}, status=403
)
if not request.data:
return Response(status=400, data={"error": "Need a body with a payload"})
path = "/v1/automation/explorer/update"
body = orjson.dumps(
{
"run_id": run_id,
**request.data,
}
)
response = requests.post(
f"{settings.SEER_AUTOFIX_URL}{path}",
data=body,
headers={
"content-type": "application/json;charset=utf-8",
**sign_with_seer_secret(body),
},
)
response.raise_for_status()
return Response(status=202, data=response.json())
| OrganizationSeerExplorerUpdateEndpoint |
python | django__django | django/core/exceptions.py | {
"start": 109,
"end": 208
} | class ____(Exception):
"""The requested model field does not exist"""
pass
| FieldDoesNotExist |
python | django__django | tests/model_formsets_regress/tests.py | {
"start": 12563,
"end": 12803
} | class ____(forms.ModelForm):
class Meta:
model = UserSite
fields = "__all__"
widgets = {
"id": CustomWidget,
"data": CustomWidget,
}
localized_fields = ("data",)
| UserSiteForm |
python | sympy__sympy | sympy/integrals/transforms.py | {
"start": 15377,
"end": 28815
} | class ____(ValueError):
"""
Exception raised by _rewrite_gamma. Mainly for internal use.
"""
pass
def _rewrite_gamma(f, s, a, b):
"""
Try to rewrite the product f(s) as a product of gamma functions,
so that the inverse Mellin transform of f can be expressed as a meijer
G function.
Explanation
===========
Return (an, ap), (bm, bq), arg, exp, fac such that
G((an, ap), (bm, bq), arg/z**exp)*fac is the inverse Mellin transform of f(s).
Raises IntegralTransformError or MellinTransformStripError on failure.
It is asserted that f has no poles in the fundamental strip designated by
(a, b). One of a and b is allowed to be None. The fundamental strip is
important, because it determines the inversion contour.
This function can handle exponentials, linear factors, trigonometric
functions.
This is a helper function for inverse_mellin_transform that will not
attempt any transformations on f.
Examples
========
>>> from sympy.integrals.transforms import _rewrite_gamma
>>> from sympy.abc import s
>>> from sympy import oo
>>> _rewrite_gamma(s*(s+3)*(s-1), s, -oo, oo)
(([], [-3, 0, 1]), ([-2, 1, 2], []), 1, 1, -1)
>>> _rewrite_gamma((s-1)**2, s, -oo, oo)
(([], [1, 1]), ([2, 2], []), 1, 1, 1)
Importance of the fundamental strip:
>>> _rewrite_gamma(1/s, s, 0, oo)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, None, oo)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, 0, None)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, -oo, 0)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(1/s, s, None, 0)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(1/s, s, -oo, None)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(2**(-s+3), s, -oo, oo)
(([], []), ([], []), 1/2, 1, 8)
"""
# Our strategy will be as follows:
# 1) Guess a constant c such that the inversion integral should be
# performed wrt s'=c*s (instead of plain s). Write s for s'.
# 2) Process all factors, rewrite them independently as gamma functions in
# argument s, or exponentials of s.
# 3) Try to transform all gamma functions s.t. they have argument
# a+s or a-s.
# 4) Check that the resulting G function parameters are valid.
# 5) Combine all the exponentials.
a_, b_ = S([a, b])
def left(c, is_numer):
"""
Decide whether pole at c lies to the left of the fundamental strip.
"""
# heuristically, this is the best chance for us to solve the inequalities
c = expand(re(c))
if a_ is None and b_ is S.Infinity:
return True
if a_ is None:
return c < b_
if b_ is None:
return c <= a_
if (c >= b_) == True:
return False
if (c <= a_) == True:
return True
if is_numer:
return None
if a_.free_symbols or b_.free_symbols or c.free_symbols:
return None # XXX
#raise IntegralTransformError('Inverse Mellin', f,
# 'Could not determine position of singularity %s'
# ' relative to fundamental strip' % c)
raise MellinTransformStripError('Pole inside critical strip?')
# 1)
s_multipliers = []
for g in f.atoms(gamma):
if not g.has(s):
continue
arg = g.args[0]
if arg.is_Add:
arg = arg.as_independent(s)[1]
coeff, _ = arg.as_coeff_mul(s)
s_multipliers += [coeff]
for g in f.atoms(sin, cos, tan, cot):
if not g.has(s):
continue
arg = g.args[0]
if arg.is_Add:
arg = arg.as_independent(s)[1]
coeff, _ = arg.as_coeff_mul(s)
s_multipliers += [coeff/pi]
s_multipliers = [Abs(x) if x.is_extended_real else x for x in s_multipliers]
common_coefficient = S.One
for x in s_multipliers:
if not x.is_Rational:
common_coefficient = x
break
s_multipliers = [x/common_coefficient for x in s_multipliers]
if not (all(x.is_Rational for x in s_multipliers) and
common_coefficient.is_extended_real):
raise IntegralTransformError("Gamma", None, "Nonrational multiplier")
s_multiplier = common_coefficient/reduce(ilcm, [S(x.q)
for x in s_multipliers], S.One)
if s_multiplier == common_coefficient:
if len(s_multipliers) == 0:
s_multiplier = common_coefficient
else:
s_multiplier = common_coefficient \
*reduce(igcd, [S(x.p) for x in s_multipliers])
f = f.subs(s, s/s_multiplier)
fac = S.One/s_multiplier
exponent = S.One/s_multiplier
if a_ is not None:
a_ *= s_multiplier
if b_ is not None:
b_ *= s_multiplier
# 2)
numer, denom = f.as_numer_denom()
numer = Mul.make_args(numer)
denom = Mul.make_args(denom)
args = list(zip(numer, repeat(True))) + list(zip(denom, repeat(False)))
facs = []
dfacs = []
# *_gammas will contain pairs (a, c) representing Gamma(a*s + c)
numer_gammas = []
denom_gammas = []
# exponentials will contain bases for exponentials of s
exponentials = []
def exception(fact):
return IntegralTransformError("Inverse Mellin", f, "Unrecognised form '%s'." % fact)
while args:
fact, is_numer = args.pop()
if is_numer:
ugammas, lgammas = numer_gammas, denom_gammas
ufacs = facs
else:
ugammas, lgammas = denom_gammas, numer_gammas
ufacs = dfacs
def linear_arg(arg):
""" Test if arg is of form a*s+b, raise exception if not. """
if not arg.is_polynomial(s):
raise exception(fact)
p = Poly(arg, s)
if p.degree() != 1:
raise exception(fact)
return p.all_coeffs()
# constants
if not fact.has(s):
ufacs += [fact]
# exponentials
elif fact.is_Pow or isinstance(fact, exp):
if fact.is_Pow:
base = fact.base
exp_ = fact.exp
else:
base = exp_polar(1)
exp_ = fact.exp
if exp_.is_Integer:
cond = is_numer
if exp_ < 0:
cond = not cond
args += [(base, cond)]*Abs(exp_)
continue
elif not base.has(s):
a, b = linear_arg(exp_)
if not is_numer:
base = 1/base
exponentials += [base**a]
facs += [base**b]
else:
raise exception(fact)
# linear factors
elif fact.is_polynomial(s):
p = Poly(fact, s)
if p.degree() != 1:
# We completely factor the poly. For this we need the roots.
# Now roots() only works in some cases (low degree), and CRootOf
# only works without parameters. So try both...
coeff = p.LT()[1]
rs = roots(p, s)
if len(rs) != p.degree():
rs = CRootOf.all_roots(p)
ufacs += [coeff]
args += [(s - c, is_numer) for c in rs]
continue
a, c = p.all_coeffs()
ufacs += [a]
c /= -a
# Now need to convert s - c
if left(c, is_numer):
ugammas += [(S.One, -c + 1)]
lgammas += [(S.One, -c)]
else:
ufacs += [-1]
ugammas += [(S.NegativeOne, c + 1)]
lgammas += [(S.NegativeOne, c)]
elif isinstance(fact, gamma):
a, b = linear_arg(fact.args[0])
if is_numer:
if (a > 0 and (left(-b/a, is_numer) == False)) or \
(a < 0 and (left(-b/a, is_numer) == True)):
raise NotImplementedError(
'Gammas partially over the strip.')
ugammas += [(a, b)]
elif isinstance(fact, sin):
# We try to re-write all trigs as gammas. This is not in
# general the best strategy, since sometimes this is impossible,
# but rewriting as exponentials would work. However trig functions
# in inverse mellin transforms usually all come from simplifying
# gamma terms, so this should work.
a = fact.args[0]
if is_numer:
# No problem with the poles.
gamma1, gamma2, fac_ = gamma(a/pi), gamma(1 - a/pi), pi
else:
gamma1, gamma2, fac_ = _rewrite_sin(linear_arg(a), s, a_, b_)
args += [(gamma1, not is_numer), (gamma2, not is_numer)]
ufacs += [fac_]
elif isinstance(fact, tan):
a = fact.args[0]
args += [(sin(a, evaluate=False), is_numer),
(sin(pi/2 - a, evaluate=False), not is_numer)]
elif isinstance(fact, cos):
a = fact.args[0]
args += [(sin(pi/2 - a, evaluate=False), is_numer)]
elif isinstance(fact, cot):
a = fact.args[0]
args += [(sin(pi/2 - a, evaluate=False), is_numer),
(sin(a, evaluate=False), not is_numer)]
else:
raise exception(fact)
fac *= Mul(*facs)/Mul(*dfacs)
# 3)
an, ap, bm, bq = [], [], [], []
for gammas, plus, minus, is_numer in [(numer_gammas, an, bm, True),
(denom_gammas, bq, ap, False)]:
while gammas:
a, c = gammas.pop()
if a != -1 and a != +1:
# We use the gamma function multiplication theorem.
p = Abs(S(a))
newa = a/p
newc = c/p
if not a.is_Integer:
raise TypeError("a is not an integer")
for k in range(p):
gammas += [(newa, newc + k/p)]
if is_numer:
fac *= (2*pi)**((1 - p)/2) * p**(c - S.Half)
exponentials += [p**a]
else:
fac /= (2*pi)**((1 - p)/2) * p**(c - S.Half)
exponentials += [p**(-a)]
continue
if a == +1:
plus.append(1 - c)
else:
minus.append(c)
# 4)
# TODO
# 5)
arg = Mul(*exponentials)
# for testability, sort the arguments
an.sort(key=default_sort_key)
ap.sort(key=default_sort_key)
bm.sort(key=default_sort_key)
bq.sort(key=default_sort_key)
return (an, ap), (bm, bq), arg, exponent, fac
@_noconds_(True)
def _inverse_mellin_transform(F, s, x_, strip, as_meijerg=False):
""" A helper for the real inverse_mellin_transform function, this one here
assumes x to be real and positive. """
x = _dummy('t', 'inverse-mellin-transform', F, positive=True)
# Actually, we won't try integration at all. Instead we use the definition
# of the Meijer G function as a fairly general inverse mellin transform.
F = F.rewrite(gamma)
for g in [factor(F), expand_mul(F), expand(F)]:
if g.is_Add:
# do all terms separately
ress = [_inverse_mellin_transform(G, s, x, strip, as_meijerg,
noconds=False)
for G in g.args]
conds = [p[1] for p in ress]
ress = [p[0] for p in ress]
res = Add(*ress)
if not as_meijerg:
res = factor(res, gens=res.atoms(Heaviside))
return res.subs(x, x_), And(*conds)
try:
a, b, C, e, fac = _rewrite_gamma(g, s, strip[0], strip[1])
except IntegralTransformError:
continue
try:
G = meijerg(a, b, C/x**e)
except ValueError:
continue
if as_meijerg:
h = G
else:
try:
from sympy.simplify import hyperexpand
h = hyperexpand(G)
except NotImplementedError:
raise IntegralTransformError(
'Inverse Mellin', F, 'Could not calculate integral')
if h.is_Piecewise and len(h.args) == 3:
# XXX we break modularity here!
h = Heaviside(x - Abs(C))*h.args[0].args[0] \
+ Heaviside(Abs(C) - x)*h.args[1].args[0]
# We must ensure that the integral along the line we want converges,
# and return that value.
# See [L], 5.2
cond = [Abs(arg(G.argument)) < G.delta*pi]
# Note: we allow ">=" here, this corresponds to convergence if we let
# limits go to oo symmetrically. ">" corresponds to absolute convergence.
cond += [And(Or(len(G.ap) != len(G.bq), 0 >= re(G.nu) + 1),
Abs(arg(G.argument)) == G.delta*pi)]
cond = Or(*cond)
if cond == False:
raise IntegralTransformError(
'Inverse Mellin', F, 'does not converge')
return (h*fac).subs(x, x_), cond
raise IntegralTransformError('Inverse Mellin', F, '')
_allowed = None
| MellinTransformStripError |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 72382,
"end": 77026
} | class ____(SchemaType, Emulated, TypeEngine[bool]):
"""A bool datatype.
:class:`.Boolean` typically uses BOOLEAN or SMALLINT on the DDL side,
and on the Python side deals in ``True`` or ``False``.
The :class:`.Boolean` datatype currently has two levels of assertion
that the values persisted are simple true/false values. For all
backends, only the Python values ``None``, ``True``, ``False``, ``1``
or ``0`` are accepted as parameter values. For those backends that
don't support a "native boolean" datatype, an option exists to
also create a CHECK constraint on the target column
"""
__visit_name__ = "boolean"
native = True
operator_classes = OperatorClass.BOOLEAN
def __init__(
self,
create_constraint: bool = False,
name: Optional[str] = None,
_create_events: bool = True,
_adapted_from: Optional[SchemaType] = None,
):
"""Construct a Boolean.
:param create_constraint: defaults to False. If the boolean
is generated as an int/smallint, also create a CHECK constraint
on the table that ensures 1 or 0 as a value.
.. note:: it is strongly recommended that the CHECK constraint
have an explicit name in order to support schema-management
concerns. This can be established either by setting the
:paramref:`.Boolean.name` parameter or by setting up an
appropriate naming convention; see
:ref:`constraint_naming_conventions` for background.
.. versionchanged:: 1.4 - this flag now defaults to False, meaning
no CHECK constraint is generated for a non-native enumerated
type.
:param name: if a CHECK constraint is generated, specify
the name of the constraint.
"""
self.create_constraint = create_constraint
self.name = name
self._create_events = _create_events
if _adapted_from:
self.dispatch = self.dispatch._join(_adapted_from.dispatch)
def copy(self, **kw):
# override SchemaType.copy() to not include to_metadata logic
return self.adapt(
cast("Type[TypeEngine[Any]]", self.__class__),
_create_events=True,
)
def _should_create_constraint(self, compiler, **kw):
if not self._is_impl_for_variant(compiler.dialect, kw):
return False
return (
not compiler.dialect.supports_native_boolean
and compiler.dialect.non_native_boolean_check_constraint
)
@util.preload_module("sqlalchemy.sql.schema")
def _set_table(self, column, table):
schema = util.preloaded.sql_schema
if not self.create_constraint:
return
variant_mapping = self._variant_mapping_for_set_table(column)
e = schema.CheckConstraint(
type_coerce(column, self).in_([0, 1]),
name=_NONE_NAME if self.name is None else self.name,
_create_rule=functools.partial(
self._should_create_constraint,
variant_mapping=variant_mapping,
),
_type_bound=True,
)
assert e.table is table
@property
def python_type(self):
return bool
_strict_bools = frozenset([None, True, False])
def _strict_as_bool(self, value):
if value not in self._strict_bools:
if not isinstance(value, int):
raise TypeError("Not a boolean value: %r" % (value,))
else:
raise ValueError(
"Value %r is not None, True, or False" % (value,)
)
return value
def literal_processor(self, dialect):
compiler = dialect.statement_compiler(dialect, None)
true = compiler.visit_true(None)
false = compiler.visit_false(None)
def process(value):
return true if self._strict_as_bool(value) else false
return process
def bind_processor(self, dialect):
_strict_as_bool = self._strict_as_bool
_coerce: Union[Type[bool], Type[int]]
if dialect.supports_native_boolean:
_coerce = bool
else:
_coerce = int
def process(value):
value = _strict_as_bool(value)
if value is not None:
value = _coerce(value)
return value
return process
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
| Boolean |
python | run-llama__llama_index | llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-postgres/llama_index/storage/kvstore/postgres/base.py | {
"start": 1827,
"end": 14932
} | class ____(BaseKVStore):
"""
Postgres Key-Value store.
Args:
connection_string (str): psycopg2 connection string
async_connection_string (str): asyncpg connection string
table_name (str): table name
schema_name (Optional[str]): schema name
perform_setup (Optional[bool]): perform table setup
debug (Optional[bool]): debug mode
use_jsonb (Optional[bool]): use JSONB data type for storage
"""
connection_string: Optional[str]
async_connection_string: Optional[str]
table_name: str
schema_name: str
perform_setup: bool
debug: bool
use_jsonb: bool
_engine: Optional[sqlalchemy.engine.Engine] = PrivateAttr()
_async_engine: Optional[sqlalchemy.ext.asyncio.AsyncEngine] = PrivateAttr()
def __init__(
self,
table_name: str,
connection_string: Optional[str] = None,
async_connection_string: Optional[str] = None,
schema_name: str = "public",
engine: Optional[sqlalchemy.engine.Engine] = None,
async_engine: Optional[sqlalchemy.ext.asyncio.AsyncEngine] = None,
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
) -> None:
try:
import asyncpg # noqa
import psycopg2 # noqa
except ImportError:
raise ImportError(
"`psycopg2-binary` and `asyncpg` packages should be pre installed"
)
table_name = table_name.lower()
schema_name = schema_name.lower()
self.connection_string = connection_string
self.async_connection_string = async_connection_string
self.table_name = table_name
self.schema_name = schema_name
self.perform_setup = perform_setup
self.debug = debug
self.use_jsonb = use_jsonb
self._engine = engine
self._async_engine = async_engine
self._is_initialized = False
if not self._async_engine and not self.async_connection_string:
raise ValueError(
"You should provide an asynchronous connection string, if you do not provide an asynchronous SqlAlchemy engine"
)
elif not self._engine and not self.connection_string:
raise ValueError(
"You should provide a synchronous connection string, if you do not provide a synchronous SqlAlchemy engine"
)
elif (
not self._engine
and not self._async_engine
and (not self.connection_string or not self.connection_string)
):
raise ValueError(
"If a SqlAlchemy engine is not provided, you should provide a synchronous and an asynchronous connection string"
)
from sqlalchemy.orm import declarative_base
# sqlalchemy model
self._base = declarative_base()
self._table_class = get_data_model(
self._base,
table_name,
schema_name,
use_jsonb=use_jsonb,
)
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
table_name: str = "kvstore",
schema_name: str = "public",
connection_string: Optional[str] = None,
async_connection_string: Optional[str] = None,
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
) -> "PostgresKVStore":
"""Return connection string from database parameters."""
conn_str = (
connection_string
or f"postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}"
)
async_conn_str = async_connection_string or (
f"postgresql+asyncpg://{user}:{password}@{host}:{port}/{database}"
)
return cls(
connection_string=conn_str,
async_connection_string=async_conn_str,
table_name=table_name,
schema_name=schema_name,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
)
@classmethod
def from_uri(
cls,
uri: str,
table_name: str = "kvstore",
schema_name: str = "public",
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
) -> "PostgresKVStore":
"""Return connection string from database parameters."""
params = params_from_uri(uri)
return cls.from_params(
**params,
table_name=table_name,
schema_name=schema_name,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
)
def _connect(self) -> Any:
from sqlalchemy import create_engine
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
self._engine = self._engine or create_engine(
self.connection_string, echo=self.debug
)
self._session = sessionmaker(self._engine)
self._async_engine = self._async_engine or create_async_engine(
self.async_connection_string
)
self._async_session = sessionmaker(self._async_engine, class_=AsyncSession)
def _create_schema_if_not_exists(self) -> None:
with self._session() as session, session.begin():
inspector = inspect(session.connection())
existing_schemas = inspector.get_schema_names()
if self.schema_name not in existing_schemas:
session.execute(CreateSchema(self.schema_name))
def _create_tables_if_not_exists(self) -> None:
with self._session() as session, session.begin():
self._base.metadata.create_all(session.connection())
def _initialize(self) -> None:
if not self._is_initialized:
self._connect()
if self.perform_setup:
self._create_schema_if_not_exists()
self._create_tables_if_not_exists()
self._is_initialized = True
def put(
self,
key: str,
val: dict,
collection: str = DEFAULT_COLLECTION,
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
self.put_all([(key, val)], collection=collection)
async def aput(
self,
key: str,
val: dict,
collection: str = DEFAULT_COLLECTION,
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
await self.aput_all([(key, val)], collection=collection)
def put_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
from sqlalchemy.dialects.postgresql import insert
self._initialize()
with self._session() as session:
for i in range(0, len(kv_pairs), batch_size):
batch = kv_pairs[i : i + batch_size]
values_to_insert = [
{
"key": key,
"namespace": collection,
"value": value,
}
for key, value in batch
]
stmt = insert(self._table_class).values(values_to_insert)
stmt = stmt.on_conflict_do_update(
index_elements=["key", "namespace"],
set_={"value": stmt.excluded.value},
)
session.execute(stmt)
session.commit()
async def aput_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
from sqlalchemy.dialects.postgresql import insert
self._initialize()
async with self._async_session() as session:
for i in range(0, len(kv_pairs), batch_size):
batch = kv_pairs[i : i + batch_size]
values_to_insert = [
{
"key": key,
"namespace": collection,
"value": value,
}
for key, value in batch
]
stmt = insert(self._table_class).values(values_to_insert)
stmt = stmt.on_conflict_do_update(
index_elements=["key", "namespace"],
set_={"value": stmt.excluded.value},
)
await session.execute(stmt)
await session.commit()
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
from sqlalchemy import select
self._initialize()
with self._session() as session:
result = session.execute(
select(self._table_class)
.filter_by(key=key)
.filter_by(namespace=collection)
)
result = result.scalars().first()
if result:
return result.value
return None
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
from sqlalchemy import select
self._initialize()
async with self._async_session() as session:
result = await session.execute(
select(self._table_class)
.filter_by(key=key)
.filter_by(namespace=collection)
)
result = result.scalars().first()
if result:
return result.value
return None
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
from sqlalchemy import select
self._initialize()
with self._session() as session:
results = session.execute(
select(self._table_class).filter_by(namespace=collection)
)
results = results.scalars().all()
return {result.key: result.value for result in results} if results else {}
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
from sqlalchemy import select
self._initialize()
async with self._async_session() as session:
results = await session.execute(
select(self._table_class).filter_by(namespace=collection)
)
results = results.scalars().all()
return {result.key: result.value for result in results} if results else {}
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
from sqlalchemy import delete
self._initialize()
with self._session() as session:
result = session.execute(
delete(self._table_class)
.filter_by(namespace=collection)
.filter_by(key=key)
)
session.commit()
return result.rowcount > 0
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
from sqlalchemy import delete
self._initialize()
async with self._async_session() as session:
async with session.begin():
result = await session.execute(
delete(self._table_class)
.filter_by(namespace=collection)
.filter_by(key=key)
)
return result.rowcount > 0
def params_from_uri(uri: str) -> dict:
result = urlparse(uri)
database = result.path[1:]
port = result.port if result.port else 5432
return {
"database": database,
"user": result.username,
"password": result.password,
"host": result.hostname,
"port": port,
}
| PostgresKVStore |
python | django-debug-toolbar__django-debug-toolbar | tests/test_utils.py | {
"start": 1852,
"end": 3916
} | class ____(unittest.TestCase):
@override_settings(DEBUG_TOOLBAR_CONFIG={"HIDE_IN_STACKTRACES": []})
def test_get_stack_trace_skip(self):
stack_trace = get_stack_trace(skip=-1)
self.assertTrue(len(stack_trace) > 2)
self.assertEqual(stack_trace[-1][0], debug_toolbar.utils.__file__)
self.assertEqual(stack_trace[-1][2], "get_stack_trace")
self.assertEqual(stack_trace[-2][0], __file__)
self.assertEqual(stack_trace[-2][2], "test_get_stack_trace_skip")
stack_trace = get_stack_trace()
self.assertTrue(len(stack_trace) > 1)
self.assertEqual(stack_trace[-1][0], __file__)
self.assertEqual(stack_trace[-1][2], "test_get_stack_trace_skip")
def test_deprecated_functions(self):
with self.assertWarns(DeprecationWarning):
stack = get_stack()
self.assertEqual(stack[0][1], __file__)
with self.assertWarns(DeprecationWarning):
stack_trace = tidy_stacktrace(reversed(stack))
self.assertEqual(stack_trace[-1][0], __file__)
@override_settings(DEBUG_TOOLBAR_CONFIG={"ENABLE_STACKTRACES_LOCALS": True})
def test_locals(self):
# This wrapper class is necessary to mask the repr() of the list
# returned by get_stack_trace(); otherwise the 'test_locals_value_1'
# string will also be present in rendered_stack_2.
class HideRepr:
def __init__(self, value):
self.value = value
x = "test_locals_value_1"
stack_1_wrapper = HideRepr(get_stack_trace())
x = x.replace("1", "2")
stack_2_wrapper = HideRepr(get_stack_trace())
rendered_stack_1 = render_stacktrace(stack_1_wrapper.value)
self.assertIn("test_locals_value_1", rendered_stack_1)
self.assertNotIn("test_locals_value_2", rendered_stack_1)
rendered_stack_2 = render_stacktrace(stack_2_wrapper.value)
self.assertNotIn("test_locals_value_1", rendered_stack_2)
self.assertIn("test_locals_value_2", rendered_stack_2)
| StackTraceTestCase |
python | pypa__pip | src/pip/_vendor/rich/color.py | {
"start": 6436,
"end": 18209
} | class ____(NamedTuple):
"""Terminal color definition."""
name: str
"""The name of the color (typically the input to Color.parse)."""
type: ColorType
"""The type of the color."""
number: Optional[int] = None
"""The color number, if a standard color, or None."""
triplet: Optional[ColorTriplet] = None
"""A triplet of color components, if an RGB color."""
def __rich__(self) -> "Text":
"""Displays the actual color if Rich printed."""
from .style import Style
from .text import Text
return Text.assemble(
f"<color {self.name!r} ({self.type.name.lower()})",
("⬤", Style(color=self)),
" >",
)
def __rich_repr__(self) -> Result:
yield self.name
yield self.type
yield "number", self.number, None
yield "triplet", self.triplet, None
@property
def system(self) -> ColorSystem:
"""Get the native color system for this color."""
if self.type == ColorType.DEFAULT:
return ColorSystem.STANDARD
return ColorSystem(int(self.type))
@property
def is_system_defined(self) -> bool:
"""Check if the color is ultimately defined by the system."""
return self.system not in (ColorSystem.EIGHT_BIT, ColorSystem.TRUECOLOR)
@property
def is_default(self) -> bool:
"""Check if the color is a default color."""
return self.type == ColorType.DEFAULT
def get_truecolor(
self, theme: Optional["TerminalTheme"] = None, foreground: bool = True
) -> ColorTriplet:
"""Get an equivalent color triplet for this color.
Args:
theme (TerminalTheme, optional): Optional terminal theme, or None to use default. Defaults to None.
foreground (bool, optional): True for a foreground color, or False for background. Defaults to True.
Returns:
ColorTriplet: A color triplet containing RGB components.
"""
if theme is None:
theme = DEFAULT_TERMINAL_THEME
if self.type == ColorType.TRUECOLOR:
assert self.triplet is not None
return self.triplet
elif self.type == ColorType.EIGHT_BIT:
assert self.number is not None
return EIGHT_BIT_PALETTE[self.number]
elif self.type == ColorType.STANDARD:
assert self.number is not None
return theme.ansi_colors[self.number]
elif self.type == ColorType.WINDOWS:
assert self.number is not None
return WINDOWS_PALETTE[self.number]
else: # self.type == ColorType.DEFAULT:
assert self.number is None
return theme.foreground_color if foreground else theme.background_color
@classmethod
def from_ansi(cls, number: int) -> "Color":
"""Create a Color number from it's 8-bit ansi number.
Args:
number (int): A number between 0-255 inclusive.
Returns:
Color: A new Color instance.
"""
return cls(
name=f"color({number})",
type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT),
number=number,
)
@classmethod
def from_triplet(cls, triplet: "ColorTriplet") -> "Color":
"""Create a truecolor RGB color from a triplet of values.
Args:
triplet (ColorTriplet): A color triplet containing red, green and blue components.
Returns:
Color: A new color object.
"""
return cls(name=triplet.hex, type=ColorType.TRUECOLOR, triplet=triplet)
@classmethod
def from_rgb(cls, red: float, green: float, blue: float) -> "Color":
"""Create a truecolor from three color components in the range(0->255).
Args:
red (float): Red component in range 0-255.
green (float): Green component in range 0-255.
blue (float): Blue component in range 0-255.
Returns:
Color: A new color object.
"""
return cls.from_triplet(ColorTriplet(int(red), int(green), int(blue)))
@classmethod
def default(cls) -> "Color":
"""Get a Color instance representing the default color.
Returns:
Color: Default color.
"""
return cls(name="default", type=ColorType.DEFAULT)
@classmethod
@lru_cache(maxsize=1024)
def parse(cls, color: str) -> "Color":
"""Parse a color definition."""
original_color = color
color = color.lower().strip()
if color == "default":
return cls(color, type=ColorType.DEFAULT)
color_number = ANSI_COLOR_NAMES.get(color)
if color_number is not None:
return cls(
color,
type=(ColorType.STANDARD if color_number < 16 else ColorType.EIGHT_BIT),
number=color_number,
)
color_match = RE_COLOR.match(color)
if color_match is None:
raise ColorParseError(f"{original_color!r} is not a valid color")
color_24, color_8, color_rgb = color_match.groups()
if color_24:
triplet = ColorTriplet(
int(color_24[0:2], 16), int(color_24[2:4], 16), int(color_24[4:6], 16)
)
return cls(color, ColorType.TRUECOLOR, triplet=triplet)
elif color_8:
number = int(color_8)
if number > 255:
raise ColorParseError(f"color number must be <= 255 in {color!r}")
return cls(
color,
type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT),
number=number,
)
else: # color_rgb:
components = color_rgb.split(",")
if len(components) != 3:
raise ColorParseError(
f"expected three components in {original_color!r}"
)
red, green, blue = components
triplet = ColorTriplet(int(red), int(green), int(blue))
if not all(component <= 255 for component in triplet):
raise ColorParseError(
f"color components must be <= 255 in {original_color!r}"
)
return cls(color, ColorType.TRUECOLOR, triplet=triplet)
@lru_cache(maxsize=1024)
def get_ansi_codes(self, foreground: bool = True) -> Tuple[str, ...]:
"""Get the ANSI escape codes for this color."""
_type = self.type
if _type == ColorType.DEFAULT:
return ("39" if foreground else "49",)
elif _type == ColorType.WINDOWS:
number = self.number
assert number is not None
fore, back = (30, 40) if number < 8 else (82, 92)
return (str(fore + number if foreground else back + number),)
elif _type == ColorType.STANDARD:
number = self.number
assert number is not None
fore, back = (30, 40) if number < 8 else (82, 92)
return (str(fore + number if foreground else back + number),)
elif _type == ColorType.EIGHT_BIT:
assert self.number is not None
return ("38" if foreground else "48", "5", str(self.number))
else: # self.standard == ColorStandard.TRUECOLOR:
assert self.triplet is not None
red, green, blue = self.triplet
return ("38" if foreground else "48", "2", str(red), str(green), str(blue))
@lru_cache(maxsize=1024)
def downgrade(self, system: ColorSystem) -> "Color":
"""Downgrade a color system to a system with fewer colors."""
if self.type in (ColorType.DEFAULT, system):
return self
# Convert to 8-bit color from truecolor color
if system == ColorSystem.EIGHT_BIT and self.system == ColorSystem.TRUECOLOR:
assert self.triplet is not None
_h, l, s = rgb_to_hls(*self.triplet.normalized)
# If saturation is under 15% assume it is grayscale
if s < 0.15:
gray = round(l * 25.0)
if gray == 0:
color_number = 16
elif gray == 25:
color_number = 231
else:
color_number = 231 + gray
return Color(self.name, ColorType.EIGHT_BIT, number=color_number)
red, green, blue = self.triplet
six_red = red / 95 if red < 95 else 1 + (red - 95) / 40
six_green = green / 95 if green < 95 else 1 + (green - 95) / 40
six_blue = blue / 95 if blue < 95 else 1 + (blue - 95) / 40
color_number = (
16 + 36 * round(six_red) + 6 * round(six_green) + round(six_blue)
)
return Color(self.name, ColorType.EIGHT_BIT, number=color_number)
# Convert to standard from truecolor or 8-bit
elif system == ColorSystem.STANDARD:
if self.system == ColorSystem.TRUECOLOR:
assert self.triplet is not None
triplet = self.triplet
else: # self.system == ColorSystem.EIGHT_BIT
assert self.number is not None
triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number])
color_number = STANDARD_PALETTE.match(triplet)
return Color(self.name, ColorType.STANDARD, number=color_number)
elif system == ColorSystem.WINDOWS:
if self.system == ColorSystem.TRUECOLOR:
assert self.triplet is not None
triplet = self.triplet
else: # self.system == ColorSystem.EIGHT_BIT
assert self.number is not None
if self.number < 16:
return Color(self.name, ColorType.WINDOWS, number=self.number)
triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number])
color_number = WINDOWS_PALETTE.match(triplet)
return Color(self.name, ColorType.WINDOWS, number=color_number)
return self
def parse_rgb_hex(hex_color: str) -> ColorTriplet:
"""Parse six hex characters in to RGB triplet."""
assert len(hex_color) == 6, "must be 6 characters"
color = ColorTriplet(
int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16)
)
return color
def blend_rgb(
color1: ColorTriplet, color2: ColorTriplet, cross_fade: float = 0.5
) -> ColorTriplet:
"""Blend one RGB color in to another."""
r1, g1, b1 = color1
r2, g2, b2 = color2
new_color = ColorTriplet(
int(r1 + (r2 - r1) * cross_fade),
int(g1 + (g2 - g1) * cross_fade),
int(b1 + (b2 - b1) * cross_fade),
)
return new_color
if __name__ == "__main__": # pragma: no cover
from .console import Console
from .table import Table
from .text import Text
console = Console()
table = Table(show_footer=False, show_edge=True)
table.add_column("Color", width=10, overflow="ellipsis")
table.add_column("Number", justify="right", style="yellow")
table.add_column("Name", style="green")
table.add_column("Hex", style="blue")
table.add_column("RGB", style="magenta")
colors = sorted((v, k) for k, v in ANSI_COLOR_NAMES.items())
for color_number, name in colors:
if "grey" in name:
continue
color_cell = Text(" " * 10, style=f"on {name}")
if color_number < 16:
table.add_row(color_cell, f"{color_number}", Text(f'"{name}"'))
else:
color = EIGHT_BIT_PALETTE[color_number] # type: ignore[has-type]
table.add_row(
color_cell, str(color_number), Text(f'"{name}"'), color.hex, color.rgb
)
console.print(table)
| Color |
python | matplotlib__matplotlib | lib/matplotlib/lines.py | {
"start": 49680,
"end": 55210
} | class ____(Line2D):
"""
A helper class that implements `~.Axes.axline`, by recomputing the artist
transform at draw time.
"""
def __init__(self, xy1, xy2, slope, **kwargs):
"""
Parameters
----------
xy1 : (float, float)
The first set of (x, y) coordinates for the line to pass through.
xy2 : (float, float) or None
The second set of (x, y) coordinates for the line to pass through.
Both *xy2* and *slope* must be passed, but one of them must be None.
slope : float or None
The slope of the line. Both *xy2* and *slope* must be passed, but one of
them must be None.
"""
super().__init__([0, 1], [0, 1], **kwargs)
if (xy2 is None and slope is None or
xy2 is not None and slope is not None):
raise TypeError(
"Exactly one of 'xy2' and 'slope' must be given")
self._slope = slope
self._xy1 = xy1
self._xy2 = xy2
def get_transform(self):
ax = self.axes
points_transform = self._transform - ax.transData + ax.transScale
if self._xy2 is not None:
# two points were given
(x1, y1), (x2, y2) = \
points_transform.transform([self._xy1, self._xy2])
dx = x2 - x1
dy = y2 - y1
if dx == 0:
if dy == 0:
raise ValueError(
f"Cannot draw a line through two identical points "
f"(x={(x1, x2)}, y={(y1, y2)})")
slope = np.inf
else:
slope = dy / dx
else:
# one point and a slope were given
x1, y1 = points_transform.transform(self._xy1)
slope = self._slope
(vxlo, vylo), (vxhi, vyhi) = ax.transScale.transform(ax.viewLim)
# General case: find intersections with view limits in either
# direction, and draw between the middle two points.
if slope == 0:
start = vxlo, y1
stop = vxhi, y1
elif np.isinf(slope):
start = x1, vylo
stop = x1, vyhi
else:
_, start, stop, _ = sorted([
(vxlo, y1 + (vxlo - x1) * slope),
(vxhi, y1 + (vxhi - x1) * slope),
(x1 + (vylo - y1) / slope, vylo),
(x1 + (vyhi - y1) / slope, vyhi),
])
return (BboxTransformTo(Bbox([start, stop]))
+ ax.transLimits + ax.transAxes)
def draw(self, renderer):
self._transformed_path = None # Force regen.
super().draw(renderer)
def get_xy1(self):
"""Return the *xy1* value of the line."""
return self._xy1
def get_xy2(self):
"""Return the *xy2* value of the line."""
return self._xy2
def get_slope(self):
"""Return the *slope* value of the line."""
return self._slope
def set_xy1(self, *args, **kwargs):
"""
Set the *xy1* value of the line.
Parameters
----------
xy1 : tuple[float, float]
Points for the line to pass through.
"""
params = _api.select_matching_signature([
lambda self, x, y: locals(), lambda self, xy1: locals(),
], self, *args, **kwargs)
if "x" in params:
_api.warn_deprecated("3.10", message=(
"Passing x and y separately to AxLine.set_xy1 is deprecated since "
"%(since)s; pass them as a single tuple instead."))
xy1 = params["x"], params["y"]
else:
xy1 = params["xy1"]
self._xy1 = xy1
def set_xy2(self, *args, **kwargs):
"""
Set the *xy2* value of the line.
.. note::
You can only set *xy2* if the line was created using the *xy2*
parameter. If the line was created using *slope*, please use
`~.AxLine.set_slope`.
Parameters
----------
xy2 : tuple[float, float]
Points for the line to pass through.
"""
if self._slope is None:
params = _api.select_matching_signature([
lambda self, x, y: locals(), lambda self, xy2: locals(),
], self, *args, **kwargs)
if "x" in params:
_api.warn_deprecated("3.10", message=(
"Passing x and y separately to AxLine.set_xy2 is deprecated since "
"%(since)s; pass them as a single tuple instead."))
xy2 = params["x"], params["y"]
else:
xy2 = params["xy2"]
self._xy2 = xy2
else:
raise ValueError("Cannot set an 'xy2' value while 'slope' is set;"
" they differ but their functionalities overlap")
def set_slope(self, slope):
"""
Set the *slope* value of the line.
.. note::
You can only set *slope* if the line was created using the *slope*
parameter. If the line was created using *xy2*, please use
`~.AxLine.set_xy2`.
Parameters
----------
slope : float
The slope of the line.
"""
if self._xy2 is None:
self._slope = slope
else:
raise ValueError("Cannot set a 'slope' value while 'xy2' is set;"
" they differ but their functionalities overlap")
| AxLine |
python | scikit-image__scikit-image | src/skimage/feature/_fisher_vector.py | {
"start": 1201,
"end": 10511
} | class ____(FisherVectorException):
pass
def learn_gmm(descriptors, *, n_modes=32, gm_args=None):
"""Estimate a Gaussian mixture model (GMM) given a set of descriptors and
number of modes (i.e. Gaussians). This function is essentially a wrapper
around the scikit-learn implementation of GMM, namely the
:class:`sklearn.mixture.GaussianMixture` class.
Due to the nature of the Fisher vector, the only enforced parameter of the
underlying scikit-learn class is the covariance_type, which must be 'diag'.
There is no simple way to know what value to use for `n_modes` a-priori.
Typically, the value is usually one of ``{16, 32, 64, 128}``. One may train
a few GMMs and choose the one that maximises the log probability of the
GMM, or choose `n_modes` such that the downstream classifier trained on
the resultant Fisher vectors has maximal performance.
Parameters
----------
descriptors : np.ndarray (N, M) or list [(N1, M), (N2, M), ...]
List of NumPy arrays, or a single NumPy array, of the descriptors
used to estimate the GMM. The reason a list of NumPy arrays is
permissible is because often when using a Fisher vector encoding,
descriptors/vectors are computed separately for each sample/image in
the dataset, such as SIFT vectors for each image. If a list if passed
in, then each element must be a NumPy array in which the number of
rows may differ (e.g. different number of SIFT vector for each image),
but the number of columns for each must be the same (i.e. the
dimensionality must be the same).
n_modes : int
The number of modes/Gaussians to estimate during the GMM estimate.
gm_args : dict
Keyword arguments that can be passed into the underlying scikit-learn
:class:`sklearn.mixture.GaussianMixture` class.
Returns
-------
gmm : :class:`sklearn.mixture.GaussianMixture`
The estimated GMM object, which contains the necessary parameters
needed to compute the Fisher vector.
References
----------
.. [1] https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html
Examples
--------
.. testsetup::
>>> import pytest; _ = pytest.importorskip('sklearn')
>>> from skimage.feature import fisher_vector
>>> rng = np.random.Generator(np.random.PCG64())
>>> sift_for_images = [rng.standard_normal((10, 128)) for _ in range(10)]
>>> num_modes = 16
>>> # Estimate 16-mode GMM with these synthetic SIFT vectors
>>> gmm = learn_gmm(sift_for_images, n_modes=num_modes)
"""
try:
from sklearn.mixture import GaussianMixture
except ImportError:
raise ImportError(
'scikit-learn is not installed. Please ensure it is installed in '
'order to use the Fisher vector functionality.'
)
if not isinstance(descriptors, (list, np.ndarray)):
raise DescriptorException(
'Please ensure descriptors are either a NumPy array, '
'or a list of NumPy arrays.'
)
d_mat_1 = descriptors[0]
if isinstance(descriptors, list) and not isinstance(d_mat_1, np.ndarray):
raise DescriptorException(
'Please ensure descriptors are a list of NumPy arrays.'
)
if isinstance(descriptors, list):
expected_shape = descriptors[0].shape
ranks = [len(e.shape) == len(expected_shape) for e in descriptors]
if not all(ranks):
raise DescriptorException(
'Please ensure all elements of your descriptor list ' 'are of rank 2.'
)
dims = [e.shape[1] == descriptors[0].shape[1] for e in descriptors]
if not all(dims):
raise DescriptorException(
'Please ensure all descriptors are of the same dimensionality.'
)
if not isinstance(n_modes, int) or n_modes <= 0:
raise FisherVectorException('Please ensure n_modes is a positive integer.')
if gm_args:
has_cov_type = 'covariance_type' in gm_args
cov_type_not_diag = gm_args['covariance_type'] != 'diag'
if has_cov_type and cov_type_not_diag:
raise FisherVectorException('Covariance type must be "diag".')
if isinstance(descriptors, list):
descriptors = np.vstack(descriptors)
if gm_args:
has_cov_type = 'covariance_type' in gm_args
if has_cov_type:
gmm = GaussianMixture(n_components=n_modes, **gm_args)
else:
gmm = GaussianMixture(
n_components=n_modes, covariance_type='diag', **gm_args
)
else:
gmm = GaussianMixture(n_components=n_modes, covariance_type='diag')
gmm.fit(descriptors)
return gmm
def fisher_vector(descriptors, gmm, *, improved=False, alpha=0.5):
"""Compute the Fisher vector given some descriptors/vectors,
and an associated estimated GMM.
Parameters
----------
descriptors : np.ndarray, shape=(n_descriptors, descriptor_length)
NumPy array of the descriptors for which the Fisher vector
representation is to be computed.
gmm : :class:`sklearn.mixture.GaussianMixture`
An estimated GMM object, which contains the necessary parameters needed
to compute the Fisher vector.
improved : bool, default=False
Flag denoting whether to compute improved Fisher vectors or not.
Improved Fisher vectors are L2 and power normalized. Power
normalization is simply f(z) = sign(z) pow(abs(z), alpha) for some
0 <= alpha <= 1.
alpha : float, default=0.5
The parameter for the power normalization step. Ignored if
improved=False.
Returns
-------
fisher_vector : np.ndarray
The computation Fisher vector, which is given by a concatenation of the
gradients of a GMM with respect to its parameters (mixture weights,
means, and covariance matrices). For D-dimensional input descriptors or
vectors, and a K-mode GMM, the Fisher vector dimensionality will be
2KD + K. Thus, its dimensionality is invariant to the number of
descriptors/vectors.
References
----------
.. [1] Perronnin, F. and Dance, C. Fisher kernels on Visual Vocabularies
for Image Categorization, IEEE Conference on Computer Vision and
Pattern Recognition, 2007
.. [2] Perronnin, F. and Sanchez, J. and Mensink T. Improving the Fisher
Kernel for Large-Scale Image Classification, ECCV, 2010
Examples
--------
.. testsetup::
>>> import pytest; _ = pytest.importorskip('sklearn')
>>> from skimage.feature import fisher_vector, learn_gmm
>>> sift_for_images = [np.random.random((10, 128)) for _ in range(10)]
>>> num_modes = 16
>>> # Estimate 16-mode GMM with these synthetic SIFT vectors
>>> gmm = learn_gmm(sift_for_images, n_modes=num_modes)
>>> test_image_descriptors = np.random.random((25, 128))
>>> # Compute the Fisher vector
>>> fv = fisher_vector(test_image_descriptors, gmm)
"""
try:
from sklearn.mixture import GaussianMixture
except ImportError:
raise ImportError(
'scikit-learn is not installed. Please ensure it is installed in '
'order to use the Fisher vector functionality.'
)
if not isinstance(descriptors, np.ndarray):
raise DescriptorException('Please ensure descriptors is a NumPy array.')
if not isinstance(gmm, GaussianMixture):
raise FisherVectorException(
'Please ensure gmm is a sklearn.mixture.GaussianMixture object.'
)
if improved and not isinstance(alpha, float):
raise FisherVectorException(
'Please ensure that the alpha parameter is a float.'
)
num_descriptors = len(descriptors)
mixture_weights = gmm.weights_
means = gmm.means_
covariances = gmm.covariances_
posterior_probabilities = gmm.predict_proba(descriptors)
# Statistics necessary to compute GMM gradients wrt its parameters
pp_sum = posterior_probabilities.mean(axis=0, keepdims=True).T
pp_x = posterior_probabilities.T.dot(descriptors) / num_descriptors
pp_x_2 = posterior_probabilities.T.dot(np.power(descriptors, 2)) / num_descriptors
# Compute GMM gradients wrt its parameters
d_pi = pp_sum.squeeze() - mixture_weights
d_mu = pp_x - pp_sum * means
d_sigma_t1 = pp_sum * np.power(means, 2)
d_sigma_t2 = pp_sum * covariances
d_sigma_t3 = 2 * pp_x * means
d_sigma = -pp_x_2 - d_sigma_t1 + d_sigma_t2 + d_sigma_t3
# Apply analytical diagonal normalization
sqrt_mixture_weights = np.sqrt(mixture_weights)
d_pi /= sqrt_mixture_weights
d_mu /= sqrt_mixture_weights[:, np.newaxis] * np.sqrt(covariances)
d_sigma /= np.sqrt(2) * sqrt_mixture_weights[:, np.newaxis] * covariances
# Concatenate GMM gradients to form Fisher vector representation
fisher_vector = np.hstack((d_pi, d_mu.ravel(), d_sigma.ravel()))
if improved:
fisher_vector = np.sign(fisher_vector) * np.power(np.abs(fisher_vector), alpha)
fisher_vector = fisher_vector / np.linalg.norm(fisher_vector)
return fisher_vector
| DescriptorException |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/relu_op_test.py | {
"start": 1482,
"end": 7493
} | class ____(test.TestCase):
def _npRelu(self, np_features):
return np.maximum(np_features, np.zeros(np_features.shape))
def testNpRelu(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 0.0], [0.1, 0.0, 0.5, 0.0, 0.9]]),
self._npRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testRelu(self, np_features):
np_relu = self._npRelu(np_features)
tf_relu = nn_ops.relu(np_features)
self.assertAllClose(np_relu, tf_relu)
self.assertShapeEqual(np_relu, tf_relu)
def testNumbersCPU(self):
for t in [
np.int32, np.int64, np.float16, np.float32, np.float64,
dtypes.bfloat16.as_numpy_dtype
]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [
np.float16,
np.float32,
np.float64,
dtypes.bfloat16.as_numpy_dtype,
]:
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testReluInt8x4GoodShape(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest("No GPU available")
inputs = np.array([[-50, 7, 23, 0], [-1, -5, 6, 11]])
np_relu = self._npRelu(inputs)
tf_relu = nn_ops.relu(constant_op.constant(inputs, dtypes.qint8))
self.assertAllClose(np_relu, tf_relu)
self.assertShapeEqual(np_relu, tf_relu)
@test_util.disable_xla("b/123338077") # Passes with XLA
def testReluInt8x4BadShape(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest("No GPU available")
inputs = constant_op.constant(
np.array([[-50, 7, 23], [0, 1, -5], [6, -2, 11]]), dtypes.qint8)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Tensor size must be a multiple of 4 for Relu<qint8>. Got 9"):
self.evaluate(nn_ops.relu(inputs))
inputs = constant_op.constant(
np.array([1, -2, 3, -4, 5, -6, 7, -8, 9, -8, 7, -6, 5, -4, 3, -2, 1]),
dtypes.qint8)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Tensor size must be a multiple of 4 for Relu<qint8>. Got 17"):
self.evaluate(nn_ops.relu(inputs))
def testNoElement(self):
self._testRelu(np.array([[], []], dtype=np.float32))
@test_util.disable_xla("b/157978028: Does not yet pass with XLA")
def testNaNPropagation(self):
for t in [np.float16, np.float32, np.float64]:
self._testRelu(np.array([-1, np.nan, 1, np.nan]).astype(t))
# The gradient test for ReLU is a bit tricky as the derivative is not well
# defined at around zero and we want to avoid that in terms of input values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(
nn_ops.relu, [x], delta=1.0 / 1024))
self.assertLess(err, 1e-6)
# The gradient test for ReLU is a bit tricky as the derivative is not well
# defined at around zero and we want to avoid that in terms of input values.
def testGradientFloat16(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float16,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu, [x]))
self.assertLess(err, 1e-6)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(
nn_ops.relu, [x], delta=1.0 / 1024))
self.assertLess(err, 1e-15)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x], delta=1.0 / 1024))
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x], delta=1.0 / 1024))
self.assertLess(err, 1e-10)
def testGradientScalar(self):
x = variables.Variable(100.)
def loss():
return nn_ops.relu(x)**2
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.25)
self.evaluate(variables.global_variables_initializer())
self.evaluate(optimizer.minimize(loss))
self.assertAllClose(x.read_value(), 50.0)
def testGradientNoElement(self):
with self.cached_session():
def f(x):
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray([[], []], dtype=np.float32)
z = list(gradient_checker_v2.compute_gradient(f, [x]))[0][0]
self.assertAllEqual(z, np.reshape(x, (0, 0)))
| ReluTest |
python | facebook__pyre-check | client/command_arguments.py | {
"start": 8791,
"end": 8915
} | class ____:
output: Optional[Path] = None
server_log_count: Optional[int] = None
@dataclass(frozen=True)
| RageArguments |
python | realpython__materials | python-double-underscore/mangling.py | {
"start": 126,
"end": 661
} | class ____(A):
def __init__(self):
super().__init__()
self.__attr = 1 # Doesn't override A.__attr
def __method(self): # Doesn't override A.__method()
print("B.__attr = ", self.__attr)
if __name__ == "__main__":
a = A()
b = B()
# Call the mangled methods
print(f"{a._A__method()=}")
print(f"{b._B__method()=}")
# Check attributes
print(f"{a.__dict__=}")
print(f"{b.__dict__=}")
# Access the attributes on b
print(f"{b._A__attr=}")
print(f"{b._B__attr=}")
| B |
python | docker__docker-py | docker/transport/sshconn.py | {
"start": 3181,
"end": 4545
} | class ____(urllib3.connectionpool.HTTPConnectionPool):
scheme = 'ssh'
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.ssh_transport = None
self.timeout = timeout
if ssh_client:
self.ssh_transport = ssh_client.get_transport()
self.ssh_host = host
def _new_conn(self):
return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
# When re-using connections, urllib3 calls fileno() on our
# SSH channel instance, quickly overloading our fd limit. To avoid this,
# we override _get_conn
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError as ae: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
) from None
# Oh well, we'll create a new connection then
return conn or self._new_conn()
| SSHConnectionPool |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 167177,
"end": 168071
} | class ____:
def test_cache_policy_init_to_none_when_not_persisting_results(self):
@task(persist_result=False)
def my_task():
pass
assert my_task.cache_policy is NO_CACHE
def test_cache_policy_init_to_default_when_persisting_results(self):
@task(persist_result=True)
def my_task():
pass
assert my_task.cache_policy is DEFAULT
def test_cache_policy_init_to_none_if_result_storage_key(self):
@task(result_storage_key="foo", persist_result=True)
def my_task():
pass
assert my_task.cache_policy is None
assert my_task.result_storage_key == "foo"
def test_cache_policy_inits_as_expected(self):
@task(cache_policy=TASK_SOURCE, persist_result=True)
def my_task():
pass
assert my_task.cache_policy is TASK_SOURCE
| TestCachePolicies |
python | ray-project__ray | rllib/algorithms/tests/test_algorithm.py | {
"start": 1006,
"end": 23627
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
register_env("multi_cart", lambda cfg: MultiAgentCartPole(cfg))
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_add_module_and_remove_module(self):
config = (
ppo.PPOConfig()
.environment(
env="multi_cart",
env_config={"num_agents": 4},
)
.env_runners(num_cpus_per_env_runner=0.1)
.training(
train_batch_size=100,
minibatch_size=50,
num_epochs=1,
)
.rl_module(
model_config=DefaultModelConfig(
fcnet_hiddens=[5], fcnet_activation="linear"
),
)
.multi_agent(
# Start with a single policy.
policies={"p0"},
policy_mapping_fn=lambda *a, **kw: "p0",
# TODO (sven): Support object store caching on new API stack.
# # And only two policies that can be stored in memory at a
# # time.
# policy_map_capacity=2,
)
.evaluation(
evaluation_num_env_runners=1,
evaluation_config=ppo.PPOConfig.overrides(num_cpus_per_env_runner=0.1),
)
)
# Construct the Algorithm with a single policy in it.
algo = config.build()
mod0 = algo.get_module("p0")
r = algo.train()
self.assertTrue("p0" in r[LEARNER_RESULTS])
for i in range(1, 3):
def new_mapping_fn(agent_id, episode, i=i, **kwargs):
return f"p{choice([i, i - 1])}"
# Add a new RLModule by class (and options).
mid = f"p{i}"
print(f"Adding new RLModule {mid} ...")
new_marl_spec = algo.add_module(
module_id=mid,
module_spec=RLModuleSpec.from_module(mod0),
# Test changing the mapping fn.
new_agent_to_module_mapping_fn=new_mapping_fn,
# Change the list of modules to train.
new_should_module_be_updated=[f"p{i}", f"p{i-1}"],
)
new_module = algo.get_module(mid)
self._assert_modules_added(
algo=algo,
marl_spec=new_marl_spec,
mids=[0, i],
trainable=[i, i - 1],
mapped=[i, i - 1],
not_mapped=[i - 2],
)
# Assert new policy is part of local worker (eval worker set does NOT
# have a local worker, only the main EnvRunnerGroup does).
multi_rl_module = algo.env_runner.module
self.assertTrue(new_module is not mod0)
for j in range(i + 1):
self.assertTrue(f"p{j}" in multi_rl_module)
self.assertTrue(len(multi_rl_module) == i + 1)
algo.train()
checkpoint = algo.save_to_path()
# Test restoring from the checkpoint (which has more policies
# than what's defined in the config dict).
test = Algorithm.from_checkpoint(checkpoint)
self._assert_modules_added(
algo=test,
marl_spec=None,
mids=[0, i - 1, i],
trainable=[i - 1, i],
mapped=[i - 1, i],
not_mapped=[i - 2],
)
# Make sure algorithm can continue training the restored policy.
test.train()
# Test creating an inference action with the added (and restored) RLModule.
mod0 = test.get_module("p0")
out = mod0.forward_inference(
{
Columns.OBS: convert_to_tensor(
np.expand_dims(mod0.config.observation_space.sample(), 0),
framework=mod0.framework,
),
},
)
action_dist_inputs = out[Columns.ACTION_DIST_INPUTS]
self.assertTrue(action_dist_inputs.shape == (1, 2))
test.stop()
# After having added 2 Modules, try to restore the Algorithm,
# but only with 1 of the originally added Modules (plus the initial
# p0).
if i == 2:
def new_mapping_fn(agent_id, episode, **kwargs):
return f"p{choice([0, 2])}"
test2 = Algorithm.from_checkpoint(path=checkpoint)
test2.remove_module(
module_id="p1",
new_agent_to_module_mapping_fn=new_mapping_fn,
new_should_module_be_updated=["p0"],
)
self._assert_modules_added(
algo=test2,
marl_spec=None,
mids=[0, 2],
trainable=[0],
mapped=[0, 2],
not_mapped=[1, 4, 5, 6],
)
# Make sure algorithm can continue training the restored policy.
mod2 = test2.get_module("p2")
test2.train()
# Test creating an inference action with the added (and restored)
# RLModule.
out = mod2.forward_exploration(
{
Columns.OBS: convert_to_tensor(
np.expand_dims(mod0.config.observation_space.sample(), 0),
framework=mod0.framework,
),
},
)
action_dist_inputs = out[Columns.ACTION_DIST_INPUTS]
self.assertTrue(action_dist_inputs.shape == (1, 2))
test2.stop()
# Delete all added modules again from Algorithm.
for i in range(2, 0, -1):
mid = f"p{i}"
marl_spec = algo.remove_module(
mid,
# Note that the complete signature of a policy_mapping_fn
# is: `agent_id, episode, worker, **kwargs`.
new_agent_to_module_mapping_fn=(
lambda agent_id, episode, i=i, **kwargs: f"p{i - 1}"
),
# Update list of policies to train.
new_should_module_be_updated=[f"p{i - 1}"],
)
self._assert_modules_added(
algo=algo,
marl_spec=marl_spec,
mids=[0, i - 1],
trainable=[i - 1],
mapped=[i - 1],
not_mapped=[i, i + 1],
)
algo.stop()
@OldAPIStack
def test_add_policy_and_remove_policy(self):
config = (
ppo.PPOConfig()
.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
.environment(
env=MultiAgentCartPole,
env_config={
"config": {
"num_agents": 4,
},
},
)
.env_runners(num_cpus_per_env_runner=0.1)
.training(
train_batch_size=100,
minibatch_size=50,
num_epochs=1,
model={
"fcnet_hiddens": [5],
"fcnet_activation": "linear",
},
)
.multi_agent(
# Start with a single policy.
policies={"p0"},
policy_mapping_fn=lambda agent_id, episode, worker, **kwargs: "p0",
# And only two policies that can be stored in memory at a
# time.
policy_map_capacity=2,
)
.evaluation(
evaluation_num_env_runners=1,
evaluation_config=ppo.PPOConfig.overrides(num_cpus_per_env_runner=0.1),
)
)
obs_space = gym.spaces.Box(-2.0, 2.0, (4,))
act_space = gym.spaces.Discrete(2)
# Pre-generate a policy instance to test adding these directly to an
# existing algorithm.
policy_obj = ppo.PPOTorchPolicy(obs_space, act_space, config.to_dict())
# Construct the Algorithm with a single policy in it.
algo = config.build()
pol0 = algo.get_policy("p0")
r = algo.train()
self.assertTrue("p0" in r["info"][LEARNER_INFO])
for i in range(1, 3):
def new_mapping_fn(agent_id, episode, worker, i=i, **kwargs):
return f"p{choice([i, i - 1])}"
# Add a new policy either by class (and options) or by instance.
pid = f"p{i}"
print(f"Adding policy {pid} ...")
# By (already instantiated) instance.
if i == 2:
new_pol = algo.add_policy(
pid,
# Pass in an already existing policy instance.
policy=policy_obj,
# Test changing the mapping fn.
policy_mapping_fn=new_mapping_fn,
# Change the list of policies to train.
policies_to_train=[f"p{i}", f"p{i - 1}"],
)
# By class (and options).
else:
new_pol = algo.add_policy(
pid,
algo.get_default_policy_class(config),
observation_space=obs_space,
action_space=act_space,
# Test changing the mapping fn.
policy_mapping_fn=new_mapping_fn,
# Change the list of policies to train.
policies_to_train=[f"p{i}", f"p{i-1}"],
)
# Make sure new policy is part of remote workers in the
# worker set and the eval worker set.
self.assertTrue(
all(
algo.env_runner_group.foreach_env_runner(
func=lambda w, pid=pid: pid in w.policy_map
)
)
)
self.assertTrue(
all(
algo.eval_env_runner_group.foreach_env_runner(
func=lambda w, pid=pid: pid in w.policy_map
)
)
)
# Assert new policy is part of local worker (eval worker set does NOT
# have a local worker, only the main EnvRunnerGroup does).
pol_map = algo.env_runner.policy_map
self.assertTrue(new_pol is not pol0)
for j in range(i + 1):
self.assertTrue(f"p{j}" in pol_map)
self.assertTrue(len(pol_map) == i + 1)
algo.train()
checkpoint = algo.save().checkpoint
# Test restoring from the checkpoint (which has more policies
# than what's defined in the config dict).
test = ppo.PPO.from_checkpoint(checkpoint)
# Make sure evaluation worker also got the restored, added policy.
def _has_policies(w, pid=pid):
return w.get_policy("p0") is not None and w.get_policy(pid) is not None
self.assertTrue(
all(test.eval_env_runner_group.foreach_env_runner(_has_policies))
)
# Make sure algorithm can continue training the restored policy.
pol0 = test.get_policy("p0")
test.train()
# Test creating an action with the added (and restored) policy.
a = test.compute_single_action(
np.zeros_like(pol0.observation_space.sample()), policy_id=pid
)
self.assertTrue(pol0.action_space.contains(a))
test.stop()
# After having added 2 policies, try to restore the Algorithm,
# but only with 1 of the originally added policies (plus the initial
# p0).
if i == 2:
def new_mapping_fn(agent_id, episode, worker, **kwargs):
return f"p{choice([0, 2])}"
test2 = ppo.PPO.from_checkpoint(
path=checkpoint,
policy_ids=["p0", "p2"],
policy_mapping_fn=new_mapping_fn,
policies_to_train=["p0"],
)
# Make sure evaluation workers have the same policies.
def _has_policies(w):
return (
w.get_policy("p0") is not None
and w.get_policy("p2") is not None
and w.get_policy("p1") is None
)
self.assertTrue(
all(test2.eval_env_runner_group.foreach_env_runner(_has_policies))
)
# Make sure algorithm can continue training the restored policy.
pol2 = test2.get_policy("p2")
test2.train()
# Test creating an action with the added (and restored) policy.
a = test2.compute_single_action(
np.zeros_like(pol2.observation_space.sample()), policy_id=pid
)
self.assertTrue(pol2.action_space.contains(a))
test2.stop()
# Delete all added policies again from Algorithm.
for i in range(2, 0, -1):
pid = f"p{i}"
algo.remove_policy(
pid,
# Note that the complete signature of a policy_mapping_fn
# is: `agent_id, episode, worker, **kwargs`.
policy_mapping_fn=(
lambda agent_id, episode, worker, i=i, **kwargs: f"p{i - 1}"
),
# Update list of policies to train.
policies_to_train=[f"p{i - 1}"],
)
# Make sure removed policy is no longer part of remote workers in the
# worker set and the eval worker set.
self.assertTrue(
algo.env_runner_group.foreach_env_runner(
func=lambda w, pid=pid: pid not in w.policy_map
)[0]
)
self.assertTrue(
algo.eval_env_runner_group.foreach_env_runner(
func=lambda w, pid=pid: pid not in w.policy_map
)[0]
)
# Assert removed policy is no longer part of local worker
# (eval worker set does NOT have a local worker, only the main
# EnvRunnerGroup does).
pol_map = algo.env_runner.policy_map
self.assertTrue(pid not in pol_map)
self.assertTrue(len(pol_map) == i)
algo.stop()
def test_evaluation_option(self):
# Use a custom callback that asserts that we are running the
# configured exact number of episodes per evaluation.
config = (
dqn.DQNConfig()
.environment(env="CartPole-v1")
.evaluation(
evaluation_interval=2,
evaluation_duration=2,
evaluation_duration_unit="episodes",
evaluation_config=dqn.DQNConfig.overrides(gamma=0.98),
)
.callbacks(callbacks_class=AssertEvalCallback)
)
algo = config.build()
# Given evaluation_interval=2, r0, r2 should not contain
# evaluation metrics, while r1, r3 should.
r0 = algo.train()
print(r0)
r1 = algo.train()
print(r1)
r2 = algo.train()
print(r2)
r3 = algo.train()
print(r3)
algo.stop()
# No eval results yet in first iteration (eval has not run yet).
self.assertFalse(EVALUATION_RESULTS in r0)
self.assertTrue(EVALUATION_RESULTS in r1)
self.assertTrue(EVALUATION_RESULTS in r2)
self.assertTrue(EVALUATION_RESULTS in r3)
self.assertTrue(ENV_RUNNER_RESULTS in r1[EVALUATION_RESULTS])
self.assertTrue(
EPISODE_RETURN_MEAN in r1[EVALUATION_RESULTS][ENV_RUNNER_RESULTS]
)
self.assertNotEqual(r1[EVALUATION_RESULTS], r3[EVALUATION_RESULTS])
def test_evaluation_option_always_attach_eval_metrics(self):
# Use a custom callback that asserts that we are running the
# configured exact number of episodes per evaluation.
config = (
dqn.DQNConfig()
.environment("CartPole-v1")
.evaluation(
evaluation_interval=2,
evaluation_duration=2,
evaluation_duration_unit="episodes",
evaluation_config=dqn.DQNConfig.overrides(gamma=0.98),
)
.reporting(min_sample_timesteps_per_iteration=100)
.callbacks(callbacks_class=AssertEvalCallback)
)
algo = config.build()
# Should only see eval results, when eval actually ran.
r0 = algo.train()
r1 = algo.train()
r2 = algo.train()
r3 = algo.train()
algo.stop()
# Eval results are not available at step 0.
self.assertTrue(EVALUATION_RESULTS not in r0)
# But step 3 should still have it, even though no eval was
# run during that step (b/c the new API stack always attaches eval
# results, after the very first evaluation).
self.assertTrue(EVALUATION_RESULTS in r1)
self.assertTrue(EVALUATION_RESULTS in r2)
self.assertTrue(EVALUATION_RESULTS in r3)
def test_evaluation_wo_eval_env_runner_group(self):
# Use a custom callback that asserts that we are running the
# configured exact number of episodes per evaluation.
config = (
ppo.PPOConfig()
.environment(env="CartPole-v1")
.callbacks(callbacks_class=AssertEvalCallback)
)
# Setup algorithm w/o evaluation worker set and still call
# evaluate() -> Expect error.
algo_wo_env_on_local_worker = config.build()
self.assertRaisesRegex(
ValueError,
"doesn't have an env!",
algo_wo_env_on_local_worker.evaluate,
)
algo_wo_env_on_local_worker.stop()
# Try again using `create_local_env_runner=True`.
# This force-adds the env on the local-worker, so this Algorithm
# can `evaluate` even though it doesn't have an evaluation-worker
# set.
config.create_env_on_local_worker = True
algo_w_env_on_local_worker = config.build()
results = algo_w_env_on_local_worker.evaluate()
assert (
ENV_RUNNER_RESULTS in results
and EPISODE_RETURN_MEAN in results[ENV_RUNNER_RESULTS]
)
algo_w_env_on_local_worker.stop()
def test_no_env_but_eval_workers_do_have_env(self):
"""Tests whether no env on workers, but env on eval workers works ok."""
script_path = Path(__file__)
input_file = os.path.join(
script_path.parent.parent.parent, "offline/tests/data/cartpole/small.json"
)
env = gym.make("CartPole-v1")
offline_rl_config = (
BCConfig()
.api_stack(
enable_rl_module_and_learner=False,
enable_env_runner_and_connector_v2=False,
)
.environment(
observation_space=env.observation_space,
action_space=env.action_space,
)
.evaluation(
evaluation_interval=1,
evaluation_num_env_runners=1,
evaluation_config=BCConfig.overrides(
env="CartPole-v1",
input_="sampler",
observation_space=None, # Test, whether this is inferred.
action_space=None, # Test, whether this is inferred.
),
)
.offline_data(input_=[input_file])
)
bc = offline_rl_config.build()
bc.train()
bc.stop()
def test_counters_after_checkpoint(self):
# We expect algorithm to no start counters from zero after loading a
# checkpoint on a fresh Algorithm instance
config = (
ppo.PPOConfig()
.api_stack(
enable_rl_module_and_learner=False,
enable_env_runner_and_connector_v2=False,
)
.environment(env="CartPole-v1")
)
algo = config.build()
self.assertTrue(all(c == 0 for c in algo._counters.values()))
algo.step()
self.assertTrue((all(c != 0 for c in algo._counters.values())))
counter_values = list(algo._counters.values())
state = algo.__getstate__()
algo.stop()
algo2 = config.build()
self.assertTrue(all(c == 0 for c in algo2._counters.values()))
algo2.__setstate__(state)
counter_values2 = list(algo2._counters.values())
self.assertEqual(counter_values, counter_values2)
def _assert_modules_added(
self,
*,
algo,
marl_spec,
mids,
trainable,
mapped,
not_mapped,
):
# Make sure Learner has the correct `should_module_be_updated` list.
self.assertEqual(
set(algo.learner_group._learner.config.policies_to_train),
{f"p{i}" for i in trainable},
)
# Make sure mids are all in marl_spec.
if marl_spec is not None:
self.assertTrue(all(f"p{m}" in marl_spec for m in mids))
# Make sure module is part of remote EnvRunners in the
# EnvRunnerGroup and the eval EnvRunnerGroup.
self.assertTrue(
all(
algo.env_runner_group.foreach_env_runner(
lambda w, mids=mids: all(f"p{i}" in w.module for i in mids)
)
)
)
self.assertTrue(
all(
algo.eval_env_runner_group.foreach_env_runner(
lambda w, mids=mids: all(f"p{i}" in w.module for i in mids)
)
)
)
# Make sure that EnvRunners have received the correct mapping fn.
mapped_pols = [
algo.env_runner.config.policy_mapping_fn(0, None) for _ in range(100)
]
self.assertTrue(all(f"p{i}" in mapped_pols for i in mapped))
self.assertTrue(not any(f"p{i}" in mapped_pols for i in not_mapped))
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestAlgorithm |
python | mwaskom__seaborn | tests/_core/test_properties.py | {
"start": 19162,
"end": 19366
} | class ____(IntervalBase):
prop = LineWidth
def test_rcparam_default(self):
with mpl.rc_context({"lines.linewidth": 2}):
assert self.prop().default_range == (1, 4)
| TestLineWidth |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.