language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | redis__redis-py | tests/test_credentials.py | {
"start": 4102,
"end": 8269
} | class ____:
@skip_if_redis_enterprise()
def test_only_pass_without_creds_provider(self, r, request):
# test for default user (`username` is supposed to be optional)
password = "password"
init_required_pass(r, request, password)
assert r.auth(password) is True
r2 = _get_client(redis.Redis, request, flushdb=False, password=password)
assert r2.ping() is True
@skip_if_redis_enterprise()
def test_user_and_pass_without_creds_provider(self, r, request):
"""
Test backward compatibility with username and password
"""
# test for other users
username = "username"
password = "password"
init_acl_user(r, request, username, password)
r2 = _get_client(
redis.Redis, request, flushdb=False, username=username, password=password
)
assert r2.ping() is True
@pytest.mark.parametrize("username", ["username", None])
@skip_if_redis_enterprise()
@pytest.mark.onlynoncluster
def test_credential_provider_with_supplier(self, r, request, username):
creds_provider = RandomAuthCredProvider(
user=username,
endpoint="localhost",
)
password = creds_provider.get_credentials()[-1]
if username:
init_acl_user(r, request, username, password)
else:
init_required_pass(r, request, password)
r2 = _get_client(
redis.Redis, request, flushdb=False, credential_provider=creds_provider
)
assert r2.ping() is True
def test_credential_provider_no_password_success(self, r, request):
init_acl_user(r, request, "username", "")
r2 = _get_client(
redis.Redis,
request,
flushdb=False,
credential_provider=NoPassCredProvider(),
)
assert r2.ping() is True
@pytest.mark.onlynoncluster
def test_credential_provider_no_password_error(self, r, request):
init_acl_user(r, request, "username", "password")
with pytest.raises(AuthenticationError) as e:
_get_client(
redis.Redis,
request,
flushdb=False,
credential_provider=NoPassCredProvider(),
)
assert e.match("invalid username-password")
@pytest.mark.onlynoncluster
def test_password_and_username_together_with_cred_provider_raise_error(
self, r, request
):
init_acl_user(r, request, "username", "password")
cred_provider = UsernamePasswordCredentialProvider(
username="username", password="password"
)
with pytest.raises(DataError) as e:
_get_client(
redis.Redis,
request,
flushdb=False,
username="username",
password="password",
credential_provider=cred_provider,
)
assert e.match(
"'username' and 'password' cannot be passed along with "
"'credential_provider'."
)
@pytest.mark.onlynoncluster
def test_change_username_password_on_existing_connection(self, r, request):
username = "origin_username"
password = "origin_password"
new_username = "new_username"
new_password = "new_password"
def teardown():
r.acl_deluser(new_username)
request.addfinalizer(teardown)
init_acl_user(r, request, username, password)
r2 = _get_client(
redis.Redis, request, flushdb=False, username=username, password=password
)
assert r2.ping() is True
conn = r2.connection_pool.get_connection()
conn.send_command("PING")
assert str_if_bytes(conn.read_response()) == "PONG"
assert conn.username == username
assert conn.password == password
init_acl_user(r, request, new_username, new_password)
conn.password = new_password
conn.username = new_username
conn.send_command("PING")
assert str_if_bytes(conn.read_response()) == "PONG"
| TestCredentialsProvider |
python | viewflow__viewflow | viewflow/forms/renderers.py | {
"start": 18989,
"end": 19527
} | class ____(object):
"""Base class for self-rendered nodes."""
def __init__(self, desktop=AUTO, tablet=AUTO, mobile=AUTO):
assert desktop == AUTO or 1 <= desktop <= 12
self.desktop = desktop
assert tablet == AUTO or 1 <= tablet <= 8
self.tablet = tablet
assert mobile == AUTO or 1 <= mobile <= 4
self.mobile = mobile
def append(self, layout: FormLayout, form: forms.Form, root: ElementTree.Element):
raise NotImplementedError("Subclass should override this")
| LayoutNode |
python | django__django | tests/admin_views/admin.py | {
"start": 9655,
"end": 9738
} | class ____(admin.StackedInline):
model = BarAccount
extra = 1
| BarAccountAdmin |
python | mlflow__mlflow | tests/tracing/test_fluent.py | {
"start": 3959,
"end": 84823
} | class ____:
@mlflow.trace
def predict_stream(self, x):
for i in range(x):
yield self.some_operation_raise_error(i)
@mlflow.trace
def some_operation_raise_error(self, i):
if i >= 1:
raise ValueError("Some error")
return i
@pytest.fixture
def mock_client():
client = mock.MagicMock()
with mock.patch("mlflow.tracing.fluent.TracingClient", return_value=client):
yield client
@pytest.fixture
def mock_otel_trace_start_time():
# mock the start time of a trace, ensuring the root span has
# a smaller start time than child spans.
with mock.patch("opentelemetry.sdk.trace.time_ns", return_value=0):
yield
@pytest.mark.parametrize("with_active_run", [True, False])
@pytest.mark.parametrize("wrap_sync_func", [True, False])
def test_trace(wrap_sync_func, with_active_run, async_logging_enabled):
model = DefaultTestModel() if wrap_sync_func else DefaultAsyncTestModel()
if with_active_run:
if IS_TRACING_SDK_ONLY:
pytest.skip("Skipping test because mlflow or mlflow-skinny is not installed.")
with mlflow.start_run() as run:
model.predict(2, 5) if wrap_sync_func else asyncio.run(model.predict(2, 5))
run_id = run.info.run_id
else:
model.predict(2, 5) if wrap_sync_func else asyncio.run(model.predict(2, 5))
if async_logging_enabled:
mlflow.flush_trace_async_logging(terminate=True)
traces = get_traces()
assert len(traces) == 1
trace = traces[0]
assert trace.info.trace_id is not None
assert trace.info.experiment_id == _get_experiment_id()
assert trace.info.execution_time_ms >= 0.1 * 1e3 # at least 0.1 sec
assert trace.info.state == TraceState.OK
assert trace.info.request_metadata[TraceMetadataKey.INPUTS] == '{"x": 2, "y": 5}'
assert trace.info.request_metadata[TraceMetadataKey.OUTPUTS] == "64"
if with_active_run:
assert trace.info.request_metadata[TraceMetadataKey.SOURCE_RUN] == run_id
assert trace.data.request == '{"x": 2, "y": 5}'
assert trace.data.response == "64"
assert len(trace.data.spans) == 3
span_name_to_span = {span.name: span for span in trace.data.spans}
root_span = span_name_to_span["predict"]
# TODO: Trace info timestamp is not accurate because it is not adjusted to exclude the latency
# assert root_span.start_time_ns // 1e6 == trace.info.timestamp_ms
assert root_span.parent_id is None
assert root_span.attributes == {
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanFunctionName": "predict",
"mlflow.spanType": "UNKNOWN",
"mlflow.spanInputs": {"x": 2, "y": 5},
"mlflow.spanOutputs": 64,
}
child_span_1 = span_name_to_span["add_one_with_custom_name"]
assert child_span_1.parent_id == root_span.span_id
assert child_span_1.attributes == {
"delta": 1,
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanFunctionName": "add_one",
"mlflow.spanType": "LLM",
"mlflow.spanInputs": {"z": 7},
"mlflow.spanOutputs": 8,
}
child_span_2 = span_name_to_span["square"]
assert child_span_2.parent_id == root_span.span_id
assert child_span_2.start_time_ns <= child_span_2.end_time_ns - 0.1 * 1e6
assert child_span_2.attributes == {
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanFunctionName": "square",
"mlflow.spanType": "UNKNOWN",
"mlflow.spanInputs": {"t": 8},
"mlflow.spanOutputs": 64,
}
@pytest.mark.parametrize("wrap_sync_func", [True, False])
def test_trace_stream(wrap_sync_func):
model = StreamTestModel() if wrap_sync_func else AsyncStreamTestModel()
stream = model.predict_stream(1, 2)
# Trace should not be logged until the generator is consumed
assert get_traces() == []
# The span should not be set to active
# because the generator is not yet consumed
assert mlflow.get_current_active_span() is None
chunks = []
if wrap_sync_func:
for chunk in stream:
chunks.append(chunk)
# The `predict` span should not be active here.
assert mlflow.get_current_active_span() is None
else:
async def consume_stream():
async for chunk in stream:
chunks.append(chunk)
assert mlflow.get_current_active_span() is None
asyncio.run(consume_stream())
traces = get_traces()
assert len(traces) == 1
trace = traces[0]
assert trace.info.trace_id is not None
assert trace.info.experiment_id == _get_experiment_id()
assert trace.info.execution_time_ms >= 0.1 * 1e3 # at least 0.1 sec
assert trace.info.status == SpanStatusCode.OK
metadata = trace.info.request_metadata
assert metadata[TraceMetadataKey.INPUTS] == '{"x": 1, "y": 2}'
assert metadata[TraceMetadataKey.OUTPUTS] == "11" # sum of the outputs
assert len(trace.data.spans) == 5 # 1 root span + 3 square + 1 generate_numbers
root_span = trace.data.spans[0]
assert root_span.name == "predict_stream"
assert root_span.inputs == {"x": 1, "y": 2}
assert root_span.outputs == 11
assert len(root_span.events) == 9
assert root_span.events[0].name == "mlflow.chunk.item.0"
assert root_span.events[0].attributes == {"mlflow.chunk.value": "0"}
assert root_span.events[8].name == "mlflow.chunk.item.8"
# Spans for the chid 'square' function
for i in range(3):
assert trace.data.spans[i + 1].name == "square"
assert trace.data.spans[i + 1].inputs == {"t": i}
assert trace.data.spans[i + 1].outputs == i**2
assert trace.data.spans[i + 1].parent_id == root_span.span_id
# Span for the 'generate_numbers' function
assert trace.data.spans[4].name == "generate_numbers"
assert trace.data.spans[4].inputs == {"z": 3}
assert trace.data.spans[4].outputs == [0, 1, 2] # list of outputs
assert len(trace.data.spans[4].events) == 3
def test_trace_with_databricks_tracking_uri(databricks_tracking_uri, monkeypatch):
monkeypatch.setenv("MLFLOW_EXPERIMENT_NAME", "test")
monkeypatch.setenv(MLFLOW_TRACKING_USERNAME.name, "bob")
monkeypatch.setattr(mlflow.tracking.context.default_context, "_get_source_name", lambda: "test")
model = DefaultTestModel()
mock_trace_info = mock.MagicMock()
mock_trace_info.trace_id = "123"
mock_trace_info.trace_location = mock.MagicMock()
mock_trace_info.trace_location.uc_schema = None
with (
mock.patch(
"mlflow.tracing.client.TracingClient._upload_trace_data"
) as mock_upload_trace_data,
mock.patch("mlflow.tracing.client._get_store") as mock_get_store,
):
mock_get_store().start_trace.return_value = mock_trace_info
model.predict(2, 5)
mlflow.flush_trace_async_logging(terminate=True)
mock_get_store().start_trace.assert_called_once()
mock_upload_trace_data.assert_called_once()
# NB: async logging should be no-op for model serving,
# but we test it here to make sure it doesn't break
@skip_when_testing_trace_sdk
def test_trace_in_databricks_model_serving(
mock_databricks_serving_with_tracing_env, async_logging_enabled
):
# Dummy flask app for prediction
import flask
from mlflow.pyfunc.context import Context, set_prediction_context
app = flask.Flask(__name__)
@app.route("/invocations", methods=["POST"])
def predict():
data = json.loads(flask.request.data.decode("utf-8"))
request_id = flask.request.headers.get("X-Request-ID")
with set_prediction_context(Context(request_id=request_id)):
prediction = TestModel().predict(**data)
trace = pop_trace(request_id=request_id)
result = json.dumps(
{
"prediction": prediction,
"trace": trace,
},
default=str,
)
return flask.Response(response=result, status=200, mimetype="application/json")
class TestModel:
@mlflow.trace()
def predict(self, x, y):
z = x + y
z = self.add_one(z)
with mlflow.start_span(name="square") as span:
z = self.square(z)
span.add_event(SpanEvent("event", 0, attributes={"foo": "bar"}))
return z
@mlflow.trace(span_type=SpanType.LLM, name="custom", attributes={"delta": 1})
def add_one(self, z):
return z + 1
def square(self, t):
return t**2
# Mimic scoring request
databricks_request_id = "request-12345"
response = app.test_client().post(
"/invocations",
headers={"X-Request-ID": databricks_request_id},
data=json.dumps({"x": 2, "y": 5}),
)
assert response.status_code == 200
assert response.json["prediction"] == 64
trace_dict = response.json["trace"]
trace = Trace.from_dict(trace_dict)
assert trace.info.trace_id.startswith("tr-")
assert trace.info.client_request_id == databricks_request_id
assert trace.info.request_metadata[TRACE_SCHEMA_VERSION_KEY] == "3"
assert len(trace.data.spans) == 3
span_name_to_span = {span.name: span for span in trace.data.spans}
root_span = span_name_to_span["predict"]
assert isinstance(root_span._trace_id, str)
assert isinstance(root_span.span_id, str)
assert isinstance(root_span.start_time_ns, int)
assert isinstance(root_span.end_time_ns, int)
assert root_span.status.status_code.value == "OK"
assert root_span.status.description == ""
assert root_span.attributes == {
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanType": SpanType.UNKNOWN,
"mlflow.spanFunctionName": "predict",
"mlflow.spanInputs": {"x": 2, "y": 5},
"mlflow.spanOutputs": 64,
}
assert root_span.events == []
child_span_1 = span_name_to_span["custom"]
assert child_span_1.parent_id == root_span.span_id
assert child_span_1.attributes == {
"delta": 1,
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanType": SpanType.LLM,
"mlflow.spanFunctionName": "add_one",
"mlflow.spanInputs": {"z": 7},
"mlflow.spanOutputs": 8,
}
assert child_span_1.events == []
child_span_2 = span_name_to_span["square"]
assert child_span_2.parent_id == root_span.span_id
assert child_span_2.attributes == {
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanType": SpanType.UNKNOWN,
}
assert asdict(child_span_2.events[0]) == {
"name": "event",
"timestamp": 0,
"attributes": {"foo": "bar"},
}
# The trace should be removed from the buffer after being retrieved
assert pop_trace(request_id=databricks_request_id) is None
# In model serving, the traces should not be stored in the fluent API buffer
traces = get_traces()
assert len(traces) == 0
@skip_when_testing_trace_sdk
def test_trace_in_model_evaluation(monkeypatch, async_logging_enabled):
from mlflow.pyfunc.context import Context, set_prediction_context
monkeypatch.setenv(MLFLOW_TRACKING_USERNAME.name, "bob")
monkeypatch.setattr(mlflow.tracking.context.default_context, "_get_source_name", lambda: "test")
class TestModel:
@mlflow.trace()
def predict(self, x, y):
return x + y
model = TestModel()
# mock _upload_trace_data to avoid generating trace data file
with mlflow.start_run() as run:
run_id = run.info.run_id
request_id_1 = "tr-eval-123"
with set_prediction_context(Context(request_id=request_id_1, is_evaluate=True)):
model.predict(1, 2)
request_id_2 = "tr-eval-456"
with set_prediction_context(Context(request_id=request_id_2, is_evaluate=True)):
model.predict(3, 4)
if async_logging_enabled:
mlflow.flush_trace_async_logging(terminate=True)
trace = mlflow.get_trace(request_id_1)
assert trace.info.request_metadata[TraceMetadataKey.SOURCE_RUN] == run_id
assert trace.info.tags[TraceTagKey.EVAL_REQUEST_ID] == request_id_1
trace = mlflow.get_trace(request_id_2)
assert trace.info.request_metadata[TraceMetadataKey.SOURCE_RUN] == run_id
assert trace.info.tags[TraceTagKey.EVAL_REQUEST_ID] == request_id_2
@pytest.mark.parametrize("sync", [True, False])
def test_trace_handle_exception_during_prediction(sync):
# This test is to make sure that the exception raised by the main prediction
# logic is raised properly and the trace is still logged.
model = ErroringTestModel() if sync else ErroringAsyncTestModel()
with pytest.raises(ValueError, match=r"Some error"):
model.predict(2, 5) if sync else asyncio.run(model.predict(2, 5))
# Trace should be logged even if the function fails, with status code ERROR
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
assert trace.info.trace_id is not None
assert trace.info.state == TraceState.ERROR
assert trace.info.request_metadata[TraceMetadataKey.INPUTS] == '{"x": 2, "y": 5}'
assert trace.info.request_metadata[TraceMetadataKey.OUTPUTS] == ""
assert trace.data.request == '{"x": 2, "y": 5}'
assert trace.data.response is None
assert len(trace.data.spans) == 2
def test_trace_handle_exception_during_streaming():
model = ErroringStreamTestModel()
stream = model.predict_stream(2)
chunks = []
with pytest.raises(ValueError, match=r"Some error"): # noqa: PT012
for chunk in stream:
chunks.append(chunk)
# The test model raises an error after the first chunk
assert len(chunks) == 1
traces = get_traces()
assert len(traces) == 1
trace = traces[0]
assert trace.info.state == TraceState.ERROR
assert trace.info.request_metadata[TraceMetadataKey.INPUTS] == '{"x": 2}'
# The test model is expected to produce three spans
# 1. Root span (error - inherited from the child)
# 2. First chunk span (OK)
# 3. Second chunk span (error)
spans = trace.data.spans
assert len(spans) == 3
assert spans[0].name == "predict_stream"
assert spans[0].status.status_code == SpanStatusCode.ERROR
assert spans[1].name == "some_operation_raise_error"
assert spans[1].status.status_code == SpanStatusCode.OK
assert spans[2].name == "some_operation_raise_error"
assert spans[2].status.status_code == SpanStatusCode.ERROR
# One chunk event + one exception event
assert len(spans[0].events) == 2
assert spans[0].events[0].name == "mlflow.chunk.item.0"
assert spans[0].events[1].name == "exception"
@pytest.mark.parametrize(
"model",
[
DefaultTestModel(),
DefaultAsyncTestModel(),
StreamTestModel(),
AsyncStreamTestModel(),
],
)
def test_trace_ignore_exception(monkeypatch, model):
# This test is to make sure that the main prediction logic is not affected
# by the exception raised by the tracing logic.
def _call_model_and_assert_output(model):
if isinstance(model, DefaultTestModel):
output = model.predict(2, 5)
assert output == 64
elif isinstance(model, DefaultAsyncTestModel):
output = asyncio.run(model.predict(2, 5))
assert output == 64
elif isinstance(model, StreamTestModel):
stream = model.predict_stream(2, 5)
assert len(list(stream)) == 21
elif isinstance(model, AsyncStreamTestModel):
astream = model.predict_stream(2, 5)
async def _consume_stream():
return [chunk async for chunk in astream]
stream = asyncio.run(_consume_stream())
assert len(list(stream)) == 21
else:
raise ValueError("Unknown model type")
# Exception during starting span: trace should not be logged.
with mock.patch("mlflow.tracing.provider._get_tracer", side_effect=ValueError("Some error")):
_call_model_and_assert_output(model)
assert get_traces() == []
# Exception during ending span: trace should not be logged.
tracer = _get_tracer(__name__)
def _always_fail(*args, **kwargs):
raise ValueError("Some error")
monkeypatch.setattr(tracer.span_processor, "on_end", _always_fail)
_call_model_and_assert_output(model)
assert len(get_traces()) == 0
def test_trace_skip_resolving_unrelated_tags_to_traces():
with mock.patch("mlflow.tracking.context.registry.DatabricksRepoRunContext") as mock_context:
mock_context.in_context.return_value = ["unrelated tags"]
model = DefaultTestModel()
model.predict(2, 5)
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
assert "unrelated tags" not in trace.info.tags
# Tracing SDK doesn't have `create_experiment` support
@skip_when_testing_trace_sdk
def test_trace_with_experiment_id():
exp_1 = mlflow.create_experiment("exp_1")
exp_2 = mlflow.set_experiment("exp_2").experiment_id # active experiment
@mlflow.trace(trace_destination=MlflowExperiment(exp_1))
def predict_1():
with mlflow.start_span(name="child_span"):
return
@mlflow.trace()
def predict_2():
pass
predict_1()
traces = get_traces(experiment_id=exp_1)
assert len(traces) == 1
assert traces[0].info.experiment_id == exp_1
assert len(traces[0].data.spans) == 2
assert get_traces(experiment_id=exp_2) == []
predict_2()
traces = get_traces(experiment_id=exp_2)
assert len(traces) == 1
assert traces[0].info.experiment_id == exp_2
# Tracing SDK doesn't have `create_experiment` support
@skip_when_testing_trace_sdk
def test_trace_with_experiment_id_issue_warning_when_not_root_span():
exp_1 = mlflow.create_experiment("exp_1")
@mlflow.trace(trace_destination=MlflowExperiment(exp_1))
def predict_1():
return predict_2()
@mlflow.trace(trace_destination=MlflowExperiment(exp_1))
def predict_2():
return
with mock.patch("mlflow.tracing.provider._logger") as mock_logger:
predict_1()
assert mock_logger.warning.call_count == 1
assert mock_logger.warning.call_args[0][0] == (
"The `experiment_id` parameter can only be used for root spans, but the span "
"`predict_2` is not a root span. The specified value `1` will be ignored."
)
def test_start_span_context_manager(async_logging_enabled):
datetime_now = datetime.now()
class TestModel:
def predict(self, x, y):
with mlflow.start_span(name="root_span") as root_span:
root_span.set_inputs({"x": x, "y": y})
z = x + y
with mlflow.start_span(name="child_span", span_type=SpanType.LLM) as child_span:
child_span.set_inputs(z)
z = z + 2
child_span.set_outputs(z)
child_span.set_attributes({"delta": 2, "time": datetime_now})
res = self.square(z)
root_span.set_outputs(res)
return res
def square(self, t):
with mlflow.start_span(name="child_span") as span:
span.set_inputs({"t": t})
res = t**2
time.sleep(0.1)
span.set_outputs(res)
return res
model = TestModel()
model.predict(1, 2)
if async_logging_enabled:
mlflow.flush_trace_async_logging(terminate=True)
traces = get_traces()
assert len(traces) == 1
trace = traces[0]
assert trace.info.trace_id is not None
assert trace.info.experiment_id == _get_experiment_id()
assert trace.info.execution_time_ms >= 0.1 * 1e3 # at least 0.1 sec
assert trace.info.state == TraceState.OK
assert trace.info.request_metadata[TraceMetadataKey.INPUTS] == '{"x": 1, "y": 2}'
assert trace.info.request_metadata[TraceMetadataKey.OUTPUTS] == "25"
assert trace.data.request == '{"x": 1, "y": 2}'
assert trace.data.response == "25"
assert len(trace.data.spans) == 3
root_span = trace.data.spans[0]
assert root_span.name == "root_span"
assert root_span.parent_id is None
assert root_span.attributes == {
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanType": "UNKNOWN",
"mlflow.spanInputs": {"x": 1, "y": 2},
"mlflow.spanOutputs": 25,
}
child_span_1 = trace.data.spans[1]
assert child_span_1.name == "child_span"
assert child_span_1.parent_id == root_span.span_id
assert child_span_1.attributes == {
"delta": 2,
"time": str(datetime_now),
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanType": "LLM",
"mlflow.spanInputs": 3,
"mlflow.spanOutputs": 5,
}
child_span_2 = trace.data.spans[2]
assert child_span_2.name == "child_span"
assert child_span_2.parent_id == root_span.span_id
assert child_span_2.attributes == {
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanType": "UNKNOWN",
"mlflow.spanInputs": {"t": 5},
"mlflow.spanOutputs": 25,
}
assert child_span_2.start_time_ns <= child_span_2.end_time_ns - 0.1 * 1e6
def test_start_span_context_manager_with_imperative_apis(async_logging_enabled):
# This test is to make sure that the spans created with fluent APIs and imperative APIs
# (via MLflow client) are correctly linked together. This usage is not recommended but
# should be supported for the advanced use cases like using LangChain callbacks as a
# part of broader tracing.
class TestModel:
def predict(self, x, y):
with mlflow.start_span(name="root_span") as root_span:
root_span.set_inputs({"x": x, "y": y})
z = x + y
child_span = start_span_no_context(
name="child_span_1",
span_type=SpanType.LLM,
parent_span=root_span,
)
child_span.set_inputs(z)
z = z + 2
time.sleep(0.1)
child_span.set_outputs(z)
child_span.set_attributes({"delta": 2})
child_span.end()
root_span.set_outputs(z)
return z
model = TestModel()
model.predict(1, 2)
if async_logging_enabled:
mlflow.flush_trace_async_logging(terminate=True)
traces = get_traces()
assert len(traces) == 1
trace = traces[0]
assert trace.info.trace_id is not None
assert trace.info.experiment_id == _get_experiment_id()
assert trace.info.execution_time_ms >= 0.1 * 1e3 # at least 0.1 sec
assert trace.info.state == TraceState.OK
assert trace.info.request_metadata[TraceMetadataKey.INPUTS] == '{"x": 1, "y": 2}'
assert trace.info.request_metadata[TraceMetadataKey.OUTPUTS] == "5"
assert trace.data.request == '{"x": 1, "y": 2}'
assert trace.data.response == "5"
assert len(trace.data.spans) == 2
span_name_to_span = {span.name: span for span in trace.data.spans}
root_span = span_name_to_span["root_span"]
assert root_span.parent_id is None
assert root_span.attributes == {
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanType": "UNKNOWN",
"mlflow.spanInputs": {"x": 1, "y": 2},
"mlflow.spanOutputs": 5,
}
child_span_1 = span_name_to_span["child_span_1"]
assert child_span_1.parent_id == root_span.span_id
assert child_span_1.attributes == {
"delta": 2,
"mlflow.traceRequestId": trace.info.trace_id,
"mlflow.spanType": "LLM",
"mlflow.spanInputs": 3,
"mlflow.spanOutputs": 5,
}
def test_mlflow_trace_isolated_from_other_otel_processors():
# Set up non-MLFlow tracer
import opentelemetry.sdk.trace as trace_sdk
from opentelemetry import trace
class MockOtelExporter(trace_sdk.export.SpanExporter):
def __init__(self):
self.exported_spans = []
def export(self, spans):
self.exported_spans.extend(spans)
other_exporter = MockOtelExporter()
provider = trace_sdk.TracerProvider()
processor = trace_sdk.export.SimpleSpanProcessor(other_exporter)
provider.add_span_processor(processor)
trace.set_tracer_provider(provider)
# Create MLflow trace
with mlflow.start_span(name="mlflow_span"):
pass
# Create non-MLflow trace
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span("non_mlflow_span"):
pass
# MLflow only processes spans created with MLflow APIs
assert len(get_traces()) == 1
assert mlflow.get_trace(mlflow.get_last_active_trace_id()).data.spans[0].name == "mlflow_span"
# Other spans are processed by the other processor
assert len(other_exporter.exported_spans) == 1
assert other_exporter.exported_spans[0].name == "non_mlflow_span"
def test_get_trace():
with mock.patch("mlflow.tracing.display.get_display_handler") as mock_get_display_handler:
model = DefaultTestModel()
model.predict(2, 5)
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
trace_id = trace.info.trace_id
mock_get_display_handler.reset_mock()
# Fetch trace from in-memory buffer
trace_in_memory = mlflow.get_trace(trace_id)
assert trace.info.trace_id == trace_in_memory.info.trace_id
mock_get_display_handler.assert_not_called()
# Fetch trace from backend
trace_from_backend = mlflow.get_trace(trace.info.trace_id)
assert trace.info.trace_id == trace_from_backend.info.trace_id
mock_get_display_handler.assert_not_called()
# If not found, return None with warning
with mock.patch("mlflow.tracing.fluent._logger") as mock_logger:
assert mlflow.get_trace("not_found") is None
mock_logger.warning.assert_called_once()
def test_test_search_traces_empty(mock_client):
mock_client.search_traces.return_value = PagedList([], token=None)
traces = mlflow.search_traces()
assert len(traces) == 0
if not IS_TRACING_SDK_ONLY:
default_columns = Trace.pandas_dataframe_columns()
assert traces.columns.tolist() == default_columns
traces = mlflow.search_traces(extract_fields=["foo.inputs.bar"])
assert traces.columns.tolist() == [*default_columns, "foo.inputs.bar"]
mock_client.search_traces.assert_called()
@pytest.mark.parametrize("return_type", ["pandas", "list"])
def test_search_traces(return_type, mock_client):
if return_type == "pandas" and IS_TRACING_SDK_ONLY:
pytest.skip("Skipping test because mlflow or mlflow-skinny is not installed.")
mock_client.search_traces.return_value = PagedList(
[
Trace(
info=create_test_trace_info(f"tr-{i}"),
data=TraceData([]),
)
for i in range(10)
],
token=None,
)
traces = mlflow.search_traces(
experiment_ids=["1"],
filter_string="name = 'foo'",
max_results=10,
order_by=["timestamp DESC"],
return_type=return_type,
)
if return_type == "pandas":
import pandas as pd
assert isinstance(traces, pd.DataFrame)
else:
assert isinstance(traces, list)
assert all(isinstance(trace, Trace) for trace in traces)
assert len(traces) == 10
mock_client.search_traces.assert_called_once_with(
experiment_ids=None,
run_id=None,
filter_string="name = 'foo'",
max_results=10,
order_by=["timestamp DESC"],
page_token=None,
model_id=None,
include_spans=True,
locations=["1"],
)
def test_search_traces_invalid_return_types(mock_client):
with pytest.raises(MlflowException, match=r"Invalid return type"):
mlflow.search_traces(return_type="invalid")
with pytest.raises(MlflowException, match=r"The `extract_fields`"):
mlflow.search_traces(extract_fields=["foo.inputs.bar"], return_type="list")
def test_search_traces_validates_experiment_ids_type():
with pytest.raises(MlflowException, match=r"locations must be a list"):
mlflow.search_traces(locations=4)
with pytest.raises(MlflowException, match=r"locations must be a list"):
mlflow.search_traces(locations="4")
def test_search_traces_with_pagination(mock_client):
traces = [
Trace(
info=create_test_trace_info(f"tr-{i}"),
data=TraceData([]),
)
for i in range(30)
]
mock_client.search_traces.side_effect = [
PagedList(traces[:10], token="token-1"),
PagedList(traces[10:20], token="token-2"),
PagedList(traces[20:], token=None),
]
traces = mlflow.search_traces(experiment_ids=["1"])
assert len(traces) == 30
common_args = {
"experiment_ids": None,
"run_id": None,
"max_results": SEARCH_TRACES_DEFAULT_MAX_RESULTS,
"filter_string": None,
"order_by": None,
"include_spans": True,
"model_id": None,
"locations": ["1"],
}
mock_client.search_traces.assert_has_calls(
[
mock.call(**common_args, page_token=None),
mock.call(**common_args, page_token="token-1"),
mock.call(**common_args, page_token="token-2"),
]
)
def test_search_traces_with_default_experiment_id(mock_client):
mock_client.search_traces.return_value = PagedList([], token=None)
with mock.patch("mlflow.tracking.fluent._get_experiment_id", return_value="123"):
mlflow.search_traces()
mock_client.search_traces.assert_called_once_with(
experiment_ids=None,
run_id=None,
filter_string=None,
max_results=SEARCH_TRACES_DEFAULT_MAX_RESULTS,
order_by=None,
page_token=None,
model_id=None,
include_spans=True,
locations=["123"],
)
@skip_when_testing_trace_sdk
def test_search_traces_yields_expected_dataframe_contents(monkeypatch):
model = DefaultTestModel()
expected_traces = []
for _ in range(10):
model.predict(2, 5)
time.sleep(0.1)
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
expected_traces.append(trace)
df = mlflow.search_traces(max_results=10, order_by=["timestamp ASC"])
assert df.columns.tolist() == [
"trace_id",
"trace",
"client_request_id",
"state",
"request_time",
"execution_duration",
"request",
"response",
"trace_metadata",
"tags",
"spans",
"assessments",
]
for idx, trace in enumerate(expected_traces):
assert df.iloc[idx].trace_id == trace.info.trace_id
assert Trace.from_json(df.iloc[idx].trace).info.trace_id == trace.info.trace_id
assert df.iloc[idx].client_request_id == trace.info.client_request_id
assert df.iloc[idx].state == trace.info.state
assert df.iloc[idx].request_time == trace.info.request_time
assert df.iloc[idx].execution_duration == trace.info.execution_duration
assert df.iloc[idx].request == json.loads(trace.data.request)
assert df.iloc[idx].response == json.loads(trace.data.response)
assert df.iloc[idx].trace_metadata == trace.info.trace_metadata
assert df.iloc[idx].spans == [s.to_dict() for s in trace.data.spans]
assert df.iloc[idx].tags == trace.info.tags
assert df.iloc[idx].assessments == trace.info.assessments
@skip_when_testing_trace_sdk
def test_search_traces_handles_missing_response_tags_and_metadata(mock_client):
mock_client.search_traces.return_value = PagedList(
[
Trace(
info=TraceInfo(
trace_id="5",
trace_location=TraceLocation.from_experiment_id("test"),
request_time=1,
execution_duration=2,
state=TraceState.OK,
),
data=TraceData(spans=[]),
)
],
token=None,
)
df = mlflow.search_traces()
assert df["response"].isnull().all()
assert df["tags"].tolist() == [{}]
assert df["trace_metadata"].tolist() == [{}]
@skip_when_testing_trace_sdk
def test_search_traces_extracts_fields_as_expected():
model = DefaultTestModel()
model.predict(2, 5)
df = mlflow.search_traces(
extract_fields=["predict.inputs.x", "predict.outputs", "add_one_with_custom_name.inputs.z"]
)
assert df["predict.inputs.x"].tolist() == [2]
assert df["predict.outputs"].tolist() == [64]
assert df["add_one_with_custom_name.inputs.z"].tolist() == [7]
# no spans have the input or output with name,
# some span has an input but we're looking for output,
@skip_when_testing_trace_sdk
def test_search_traces_with_input_and_no_output():
with mlflow.start_span(name="with_input_and_no_output") as span:
span.set_inputs({"a": 1})
df = mlflow.search_traces(
extract_fields=["with_input_and_no_output.inputs.a", "with_input_and_no_output.outputs"]
)
assert df["with_input_and_no_output.inputs.a"].tolist() == [1]
assert df["with_input_and_no_output.outputs"].isnull().all()
@skip_when_testing_trace_sdk
def test_search_traces_with_non_dict_span_inputs_outputs():
with mlflow.start_span(name="non_dict_span") as span:
span.set_inputs(["a", "b"])
span.set_outputs([1, 2, 3])
df = mlflow.search_traces(
extract_fields=["non_dict_span.inputs", "non_dict_span.outputs", "non_dict_span.inputs.x"]
)
assert df["non_dict_span.inputs"].tolist() == [["a", "b"]]
assert df["non_dict_span.outputs"].tolist() == [[1, 2, 3]]
assert df["non_dict_span.inputs.x"].isnull().all()
@skip_when_testing_trace_sdk
def test_search_traces_extract_fields_preserves_standard_columns():
with mlflow.start_span(name="test_span") as span:
span.set_inputs({"x": 1})
span.set_outputs({"y": 2})
df = mlflow.search_traces(extract_fields=["test_span.inputs.x"])
# Verify standard columns still exist
assert "trace_id" in df.columns
assert "spans" in df.columns
assert "tags" in df.columns
assert "request" in df.columns
assert "response" in df.columns
# Verify extract field was added
assert "test_span.inputs.x" in df.columns
assert df["test_span.inputs.x"].tolist() == [1]
@skip_when_testing_trace_sdk
def test_search_traces_with_multiple_spans_with_same_name():
class TestModel:
@mlflow.trace(name="duplicate_name")
def predict(self, x, y):
z = x + y
z = self.add_one(z)
z = mlflow.trace(self.square)(z)
return z # noqa: RET504
@mlflow.trace(span_type=SpanType.LLM, name="duplicate_name", attributes={"delta": 1})
def add_one(self, z):
return z + 1
def square(self, t):
res = t**2
time.sleep(0.1)
return res
model = TestModel()
model.predict(2, 5)
df = mlflow.search_traces(
extract_fields=[
"duplicate_name.inputs.x",
"duplicate_name.inputs.y",
"duplicate_name.inputs.z",
]
)
# Duplicate spans would all be null
assert df["duplicate_name.inputs.x"].isnull().all()
assert df["duplicate_name.inputs.y"].isnull().all()
assert df["duplicate_name.inputs.z"].tolist() == [7]
# Test a field that doesn't exist for extraction - we shouldn't throw, just return empty column
@skip_when_testing_trace_sdk
def test_search_traces_with_non_existent_field():
model = DefaultTestModel()
model.predict(2, 5)
df = mlflow.search_traces(
extract_fields=[
"predict.inputs.k",
"predict.inputs.x",
"predict.outputs",
"add_one_with_custom_name.inputs.z",
]
)
assert df["predict.inputs.k"].isnull().all()
assert df["predict.inputs.x"].tolist() == [2]
assert df["predict.outputs"].tolist() == [64]
assert df["add_one_with_custom_name.inputs.z"].tolist() == [7]
@skip_when_testing_trace_sdk
def test_search_traces_span_and_field_name_with_dot():
with mlflow.start_span(name="span.name") as span:
span.set_inputs({"a.b": 0})
span.set_outputs({"x.y": 1})
df = mlflow.search_traces(
extract_fields=[
"`span.name`.inputs",
"`span.name`.inputs.`a.b`",
"`span.name`.outputs",
"`span.name`.outputs.`x.y`",
]
)
assert df["span.name.inputs"].tolist() == [{"a.b": 0}]
assert df["span.name.inputs.a.b"].tolist() == [0]
assert df["span.name.outputs"].tolist() == [{"x.y": 1}]
assert df["span.name.outputs.x.y"].tolist() == [1]
@skip_when_testing_trace_sdk
def test_search_traces_with_run_id():
def _create_trace(name, tags=None):
with mlflow.start_span(name=name) as span:
for k, v in (tags or {}).items():
mlflow.set_trace_tag(trace_id=span.request_id, key=k, value=v)
return span.request_id
def _get_names(traces):
tags = traces["tags"].tolist()
return [tags[i].get(TraceTagKey.TRACE_NAME) for i in range(len(tags))]
with mlflow.start_run() as run1:
_create_trace(name="tr-1")
_create_trace(name="tr-2", tags={"fruit": "apple"})
with mlflow.start_run() as run2:
_create_trace(name="tr-3")
_create_trace(name="tr-4", tags={"fruit": "banana"})
_create_trace(name="tr-5", tags={"fruit": "apple"})
traces = mlflow.search_traces()
assert _get_names(traces) == ["tr-5", "tr-4", "tr-3", "tr-2", "tr-1"]
traces = mlflow.search_traces(run_id=run1.info.run_id)
assert _get_names(traces) == ["tr-2", "tr-1"]
traces = mlflow.search_traces(
run_id=run2.info.run_id,
filter_string="tag.fruit = 'apple'",
)
assert _get_names(traces) == ["tr-5"]
with pytest.raises(MlflowException, match="You cannot filter by run_id when it is already"):
mlflow.search_traces(
run_id=run2.info.run_id,
filter_string="metadata.mlflow.sourceRun = '123'",
)
with pytest.raises(MlflowException, match=f"Run {run1.info.run_id} belongs to"):
mlflow.search_traces(run_id=run1.info.run_id, experiment_ids=["1"])
@pytest.mark.parametrize(
"extract_fields",
[
["span.llm.inputs"],
["span.llm.inputs.x"],
["span.llm.outputs"],
],
)
@skip_when_testing_trace_sdk
def test_search_traces_invalid_extract_fields(extract_fields):
with pytest.raises(MlflowException, match="Invalid field type"):
mlflow.search_traces(extract_fields=extract_fields)
def test_get_last_active_trace_id():
assert mlflow.get_last_active_trace_id() is None
@mlflow.trace()
def predict(x, y):
return x + y
predict(1, 2)
predict(2, 5)
predict(3, 6)
trace_id = mlflow.get_last_active_trace_id()
trace = mlflow.get_trace(trace_id)
assert trace.info.trace_id is not None
assert trace.data.request == '{"x": 3, "y": 6}'
# Mutation of the copy should not affect the original trace logged in the backend
trace.info.state = TraceState.ERROR
original_trace = mlflow.get_trace(trace.info.trace_id)
assert original_trace.info.state == TraceState.OK
def test_get_last_active_trace_thread_local():
assert mlflow.get_last_active_trace_id() is None
def run(id):
@mlflow.trace(name=f"predict_{id}")
def predict(x, y):
return x + y
predict(1, 2)
return mlflow.get_last_active_trace_id(thread_local=True)
with ThreadPoolExecutor(max_workers=4) as executor:
futures = [executor.submit(run, i) for i in range(10)]
trace_ids = [future.result() for future in futures]
assert len(trace_ids) == 10
for i, trace_id in enumerate(trace_ids):
trace = mlflow.get_trace(trace_id)
assert trace.info.state == TraceState.OK
assert trace.data.spans[0].name == f"predict_{i}"
def test_trace_with_classmethod():
class TestModel:
@mlflow.trace
@classmethod
def predict(cls, x, y):
return x + y
# Call the classmethod
result = TestModel.predict(1, 2)
assert result == 3
# Get the last trace and verify inputs and outputs
trace_id = mlflow.get_last_active_trace_id()
assert trace_id is not None
trace = mlflow.get_trace(trace_id)
assert trace is not None
assert len(trace.data.spans) > 0
# The first span should be our traced function
span = trace.data.spans[0]
assert span.name == "predict"
assert span.inputs == {"x": 1, "y": 2}
assert span.outputs == 3
def test_trace_with_classmethod_order_reversed():
class TestModel:
@classmethod
@mlflow.trace
def predict(cls, x, y):
return x + y
# Call the classmethod
result = TestModel.predict(1, 2)
assert result == 3
# Get the last trace and verify inputs and outputs
trace_id = mlflow.get_last_active_trace_id()
assert trace_id is not None
trace = mlflow.get_trace(trace_id)
assert trace is not None
assert len(trace.data.spans) > 0
# The first span should be our traced function
span = trace.data.spans[0]
assert span.name == "predict"
assert span.inputs == {"x": 1, "y": 2}
assert span.outputs == 3
def test_trace_with_staticmethod():
class TestModel:
@mlflow.trace
@staticmethod
def predict(x, y):
return x + y
# Call the staticmethod
result = TestModel.predict(1, 2)
assert result == 3
# Get the last trace and verify inputs and outputs
trace_id = mlflow.get_last_active_trace_id()
assert trace_id is not None
trace = mlflow.get_trace(trace_id)
assert trace is not None
assert len(trace.data.spans) > 0
# The first span should be our traced function
span = trace.data.spans[0]
assert span.name == "predict"
assert span.inputs == {"x": 1, "y": 2}
assert span.outputs == 3
def test_trace_with_staticmethod_order_reversed():
class TestModel:
@staticmethod
@mlflow.trace
def predict(x, y):
return x + y
# Call the staticmethod
result = TestModel.predict(1, 2)
assert result == 3
# Get the last trace and verify inputs and outputs
trace_id = mlflow.get_last_active_trace_id()
assert trace_id is not None
trace = mlflow.get_trace(trace_id)
assert trace is not None
assert len(trace.data.spans) > 0
# The first span should be our traced function
span = trace.data.spans[0]
assert span.name == "predict"
assert span.inputs == {"x": 1, "y": 2}
assert span.outputs == 3
def test_update_current_trace():
@mlflow.trace(name="root_function")
def f(x):
mlflow.update_current_trace(tags={"fruit": "apple", "animal": "dog"})
return g(x) + 1
@mlflow.trace(name="level_1_function")
def g(y):
with mlflow.start_span(name="level_2_span"):
mlflow.update_current_trace(tags={"fruit": "orange", "vegetable": "carrot"})
return h(y) * 2
@mlflow.trace(name="level_3_function")
def h(z):
with mlflow.start_span(name="level_4_span"):
with mlflow.start_span(name="level_5_span"):
mlflow.update_current_trace(tags={"depth": "deep", "level": "5"})
return z + 10
f(1)
expected_tags = {
"animal": "dog",
"fruit": "orange",
"vegetable": "carrot",
"depth": "deep",
"level": "5",
}
# Validate in-memory trace
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
assert trace.info.state == TraceState.OK
tags = {k: v for k, v in trace.info.tags.items() if not k.startswith("mlflow.")}
assert tags == expected_tags
# Validate backend trace
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.state == TraceState.OK
tags = {k: v for k, v in traces[0].info.tags.items() if not k.startswith("mlflow.")}
assert tags == expected_tags
# Verify trace can be searched by span names (only when database backend is available)
if not IS_TRACING_SDK_ONLY:
trace_by_root_span = mlflow.search_traces(
filter_string='span.name = "root_function"', return_type="list"
)
assert len(trace_by_root_span) == 1
trace_by_level_2_span = mlflow.search_traces(
filter_string='span.name = "level_2_span"', return_type="list"
)
assert len(trace_by_level_2_span) == 1
trace_by_level_5_span = mlflow.search_traces(
filter_string='span.name = "level_5_span"', return_type="list"
)
assert len(trace_by_level_5_span) == 1
# All searches should return the same trace
assert trace_by_root_span[0].info.request_id == trace.info.request_id
assert trace_by_level_2_span[0].info.request_id == trace.info.request_id
assert trace_by_level_5_span[0].info.request_id == trace.info.request_id
def test_update_current_trace_with_client_request_id():
"""Test that update_current_trace correctly handles client_request_id parameter."""
from mlflow.tracing.trace_manager import InMemoryTraceManager
# Test updating during span execution
with mlflow.start_span("test_span") as span:
# Update with both tags and client_request_id
mlflow.update_current_trace(tags={"operation": "test"}, client_request_id="req-12345")
# Check in-memory trace during execution
trace_manager = InMemoryTraceManager.get_instance()
with trace_manager.get_trace(span.trace_id) as trace:
assert trace.info.client_request_id == "req-12345"
tags = {k: v for k, v in trace.info.tags.items() if not k.startswith("mlflow.")}
assert tags["operation"] == "test"
# Test with tags only
with mlflow.start_span("test_span_2") as span:
mlflow.update_current_trace(tags={"operation": "tags_only"})
trace_manager = InMemoryTraceManager.get_instance()
with trace_manager.get_trace(span.trace_id) as trace:
assert trace.info.client_request_id is None
tags = {k: v for k, v in trace.info.tags.items() if not k.startswith("mlflow.")}
assert tags["operation"] == "tags_only"
# Test with client_request_id only
with mlflow.start_span("test_span_3") as span:
mlflow.update_current_trace(client_request_id="req-67890")
trace_manager = InMemoryTraceManager.get_instance()
with trace_manager.get_trace(span.trace_id) as trace:
assert trace.info.client_request_id == "req-67890"
def test_update_current_trace_client_request_id_overwrites():
"""Test that client_request_id can be overwritten by subsequent calls."""
from mlflow.tracing.trace_manager import InMemoryTraceManager
with mlflow.start_span("overwrite_test") as span:
# First set
mlflow.update_current_trace(client_request_id="req-initial")
# Overwrite with new value
mlflow.update_current_trace(client_request_id="req-updated")
# Check during execution
trace_manager = InMemoryTraceManager.get_instance()
with trace_manager.get_trace(span.trace_id) as trace:
# Should have the updated value, not the initial one
assert trace.info.client_request_id == "req-updated"
def test_update_current_trace_client_request_id_stringification():
"""Test that client_request_id is stringified when it's not a string."""
from mlflow.tracing.trace_manager import InMemoryTraceManager
test_cases = [
(123, "123"),
(45.67, "45.67"),
(True, "True"),
(False, "False"),
(None, None), # None should remain None
(["list", "value"], "['list', 'value']"),
({"dict": "value"}, "{'dict': 'value'}"),
]
for input_value, expected_output in test_cases:
with mlflow.start_span(f"stringification_test_{input_value}") as span:
if input_value is None:
# None should not update the client_request_id
mlflow.update_current_trace(client_request_id=input_value)
trace_manager = InMemoryTraceManager.get_instance()
with trace_manager.get_trace(span.trace_id) as trace:
assert trace.info.client_request_id is None
else:
mlflow.update_current_trace(client_request_id=input_value)
trace_manager = InMemoryTraceManager.get_instance()
with trace_manager.get_trace(span.trace_id) as trace:
assert trace.info.client_request_id == expected_output
assert isinstance(trace.info.client_request_id, str)
def test_update_current_trace_with_metadata():
"""Test that update_current_trace correctly handles metadata parameter."""
@mlflow.trace
def f():
mlflow.update_current_trace(
metadata={
"mlflow.source.name": "inference.py",
"mlflow.source.git.commit": "1234567890",
"mlflow.source.git.repoURL": "https://github.com/mlflow/mlflow",
"non-string-metadata": 123,
},
)
f()
expected_metadata = {
"mlflow.source.name": "inference.py",
"mlflow.source.git.commit": "1234567890",
"mlflow.source.git.repoURL": "https://github.com/mlflow/mlflow",
"non-string-metadata": "123", # Should be stringified
}
# Validate in-memory trace
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
for k, v in expected_metadata.items():
assert trace.info.trace_metadata[k] == v
# Validate backend trace
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.status == "OK"
for k, v in expected_metadata.items():
assert traces[0].info.trace_metadata[k] == v
@skip_when_testing_trace_sdk
def test_update_current_trace_with_model_id():
with mlflow.start_span("test_span"):
mlflow.update_current_trace(model_id="model-123")
trace = get_traces()[0]
assert trace.info.trace_metadata[TraceMetadataKey.MODEL_ID] == "model-123"
@skip_when_testing_trace_sdk
def test_update_current_trace_should_not_raise_during_model_logging():
"""
Tracing is disabled while model logging. When the model includes
`update_current_trace` call, it should be no-op.
"""
class MyModel(mlflow.pyfunc.PythonModel):
@mlflow.trace
def predict(self, model_inputs):
mlflow.update_current_trace(tags={"fruit": "apple"})
return [model_inputs[0] + 1]
model = MyModel()
model.predict([1])
trace = get_traces()[0]
assert trace.info.state == "OK"
assert trace.info.tags["fruit"] == "apple"
purge_traces()
model_info = mlflow.pyfunc.log_model(
python_model=model,
name="model",
input_example=[0],
)
# Trace should not be generated while logging the model
assert get_traces() == []
# Signature should be inferred properly without raising any exception
assert model_info.signature is not None
assert model_info.signature.inputs is not None
assert model_info.signature.outputs is not None
# Loading back the model
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
loaded_model.predict([1])
trace = get_traces()[0]
assert trace.info.status == "OK"
assert trace.info.tags["fruit"] == "apple"
def test_update_current_trace_with_state():
"""Test the state parameter in update_current_trace."""
from mlflow.tracing.trace_manager import InMemoryTraceManager
# Test with TraceState enum
with mlflow.start_span("test_span") as span:
mlflow.update_current_trace(state=TraceState.ERROR)
trace_manager = InMemoryTraceManager.get_instance()
with trace_manager.get_trace(span.trace_id) as trace:
assert trace.info.state == TraceState.ERROR
# Test with string state
with mlflow.start_span("test_span_2") as span:
mlflow.update_current_trace(state="OK")
trace_manager = InMemoryTraceManager.get_instance()
with trace_manager.get_trace(span.trace_id) as trace:
assert trace.info.state == TraceState.OK
# Test with combined parameters
with mlflow.start_span("test_span_3") as span:
mlflow.update_current_trace(
state="ERROR", tags={"error_type": "validation"}, client_request_id="req-123"
)
trace_manager = InMemoryTraceManager.get_instance()
with trace_manager.get_trace(span.trace_id) as trace:
assert trace.info.state == TraceState.ERROR
assert trace.info.tags["error_type"] == "validation"
assert trace.info.client_request_id == "req-123"
def test_update_current_trace_state_none():
"""Test that state=None doesn't change trace state."""
from mlflow.tracing.trace_manager import InMemoryTraceManager
with mlflow.start_span("test_span") as span:
# First set state to OK
mlflow.update_current_trace(state="OK")
# Then call with state=None - should not change state
mlflow.update_current_trace(state=None, tags={"test": "value"})
trace_manager = InMemoryTraceManager.get_instance()
with trace_manager.get_trace(span.trace_id) as trace:
assert trace.info.state == TraceState.OK
assert trace.info.tags["test"] == "value"
def test_update_current_trace_state_validation():
"""Test that state validation only allows OK or ERROR."""
with mlflow.start_span("test_span"):
# Valid states should work
mlflow.update_current_trace(state="OK")
mlflow.update_current_trace(state="ERROR")
mlflow.update_current_trace(state=TraceState.OK)
mlflow.update_current_trace(state=TraceState.ERROR)
# Invalid string state should raise an exception
with pytest.raises(
MlflowException, match=r"State must be either 'OK' or 'ERROR', but got 'IN_PROGRESS'"
):
mlflow.update_current_trace(state="IN_PROGRESS")
# Invalid enum state should raise an exception
with pytest.raises(
MlflowException,
match=r"State must be either 'OK' or 'ERROR', but got 'STATE_UNSPECIFIED'",
):
mlflow.update_current_trace(state=TraceState.STATE_UNSPECIFIED)
# Custom invalid string should raise an exception
with pytest.raises(
MlflowException, match=r"State must be either 'OK' or 'ERROR', but got 'CUSTOM_STATE'"
):
mlflow.update_current_trace(state="CUSTOM_STATE")
# Invalid types should raise an exception with a proper error message
with pytest.raises(
MlflowException, match=r"State must be either 'OK' or 'ERROR', but got '123'"
):
mlflow.update_current_trace(state=123)
def test_span_record_exception_with_string():
"""Test record_exception method with string parameter."""
with mlflow.start_span("test_span") as span:
span.record_exception("Something went wrong")
# Check persisted trace
trace = get_traces()[0]
spans = trace.data.spans
test_span = spans[0]
# Verify span status is ERROR
assert test_span.status.status_code == SpanStatusCode.ERROR
# Verify exception event was added
exception_events = [event for event in test_span.events if "exception" in event.name.lower()]
assert len(exception_events) == 1
# Verify exception message is in the event
exception_event = exception_events[0]
assert "Something went wrong" in str(exception_event.attributes)
def test_span_record_exception_with_exception():
"""Test record_exception method with Exception parameter."""
test_exception = ValueError("Custom error message")
with mlflow.start_span("test_span") as span:
span.record_exception(test_exception)
# Check persisted trace
trace = get_traces()[0]
spans = trace.data.spans
test_span = spans[0]
# Verify span status is ERROR
assert test_span.status.status_code == SpanStatusCode.ERROR
# Verify exception event was added with proper exception details
exception_events = [event for event in test_span.events if "exception" in event.name.lower()]
assert len(exception_events) == 1
exception_event = exception_events[0]
event_attrs = str(exception_event.attributes)
assert "ValueError" in event_attrs
assert "Custom error message" in event_attrs
def test_span_record_exception_invalid_type():
"""Test record_exception method with invalid parameter type."""
with mlflow.start_span("test_span") as span:
with pytest.raises(
MlflowException,
match="The `exception` parameter must be an Exception instance or a string",
):
span.record_exception(123)
def test_combined_state_and_record_exception():
"""Test using both status update and record_exception together."""
@mlflow.trace
def test_function():
# Get current span and record exception
span = mlflow.get_current_active_span()
span.record_exception("Processing failed")
# Update trace state independently
mlflow.update_current_trace(state="ERROR", tags={"error_source": "processing"})
return "result"
test_function()
# Check the trace
trace = get_traces()[0]
# Verify trace state was set to ERROR
assert trace.info.state == TraceState.ERROR
assert trace.info.tags["error_source"] == "processing"
# Verify span has exception event and ERROR state
spans = trace.data.spans
root_span = spans[0]
assert root_span.status.status_code == SpanStatusCode.ERROR
exception_events = [event for event in root_span.events if "exception" in event.name.lower()]
assert len(exception_events) == 1
assert "Processing failed" in str(exception_events[0].attributes)
def test_span_record_exception_no_op_span():
"""Test that record_exception works gracefully with NoOpSpan."""
# This should not raise an exception
from mlflow.entities.span import NoOpSpan
no_op_span = NoOpSpan()
no_op_span.record_exception("This should be ignored")
# Should not create any traces
assert get_traces() == []
def test_update_current_trace_state_isolation():
"""Test that state update doesn't affect span status."""
with mlflow.start_span("test_span") as span:
# Set span status to OK explicitly
span.set_status("OK")
# Update trace state to ERROR
mlflow.update_current_trace(state="ERROR")
# Span status should still be OK
assert span.status.status_code == SpanStatusCode.OK
# Check the final persisted trace
trace = get_traces()[0]
assert trace.info.state == TraceState.ERROR
# Verify span status remained OK despite trace state being ERROR
spans = trace.data.spans
test_span = spans[0]
assert test_span.status.status_code == SpanStatusCode.OK
@skip_when_testing_trace_sdk
def test_non_ascii_characters_not_encoded_as_unicode():
with mlflow.start_span() as span:
span.set_inputs({"japanese": "あ", "emoji": "👍"})
trace = mlflow.get_trace(span.trace_id)
span = trace.data.spans[0]
assert span.inputs == {"japanese": "あ", "emoji": "👍"}
_SAMPLE_REMOTE_TRACE = {
"info": {
"request_id": "2e72d64369624e6888324462b62dc120",
"experiment_id": "0",
"timestamp_ms": 1726145090860,
"execution_time_ms": 162,
"status": "OK",
"request_metadata": {
"mlflow.trace_schema.version": "2",
"mlflow.traceInputs": '{"x": 1}',
"mlflow.traceOutputs": '{"prediction": 1}',
},
"tags": {
"fruit": "apple",
"food": "pizza",
},
},
"data": {
"spans": [
{
"name": "remote",
"context": {
"span_id": "0x337af925d6629c01",
"trace_id": "0x05e82d1fc4486f3986fae6dd7b5352b1",
},
"parent_id": None,
"start_time": 1726145091022155863,
"end_time": 1726145091022572053,
"status_code": "OK",
"status_message": "",
"attributes": {
"mlflow.traceRequestId": '"2e72d64369624e6888324462b62dc120"',
"mlflow.spanType": '"UNKNOWN"',
"mlflow.spanInputs": '{"x": 1}',
"mlflow.spanOutputs": '{"prediction": 1}',
},
"events": [
{"name": "event", "timestamp": 1726145091022287, "attributes": {"foo": "bar"}}
],
},
{
"name": "remote-child",
"context": {
"span_id": "0xa3dde9f2ebac1936",
"trace_id": "0x05e82d1fc4486f3986fae6dd7b5352b1",
},
"parent_id": "0x337af925d6629c01",
"start_time": 1726145091022419340,
"end_time": 1726145091022497944,
"status_code": "OK",
"status_message": "",
"attributes": {
"mlflow.traceRequestId": '"2e72d64369624e6888324462b62dc120"',
"mlflow.spanType": '"UNKNOWN"',
},
"events": [],
},
],
"request": '{"x": 1}',
"response": '{"prediction": 1}',
},
}
def test_add_trace(mock_otel_trace_start_time):
# Mimic a remote service call that returns a trace as a part of the response
def dummy_remote_call():
return {"prediction": 1, "trace": _SAMPLE_REMOTE_TRACE}
@mlflow.trace
def predict(add_trace: bool):
resp = dummy_remote_call()
if add_trace:
mlflow.add_trace(resp["trace"])
return resp["prediction"]
# If we don't call add_trace, the trace from the remote service should be discarded
predict(add_trace=False)
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
assert len(trace.data.spans) == 1
# If we call add_trace, the trace from the remote service should be merged
predict(add_trace=True)
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
trace_id = trace.info.trace_id
assert trace_id is not None
assert trace.data.request == '{"add_trace": true}'
assert trace.data.response == "1"
# Remote spans should be merged
assert len(trace.data.spans) == 3
assert all(span.trace_id == trace_id for span in trace.data.spans)
parent_span, child_span, grandchild_span = trace.data.spans
assert child_span.parent_id == parent_span.span_id
assert child_span._trace_id == parent_span._trace_id
assert grandchild_span.parent_id == child_span.span_id
assert grandchild_span._trace_id == parent_span._trace_id
# Check if span information is correctly copied
rs = Trace.from_dict(_SAMPLE_REMOTE_TRACE).data.spans[0]
assert child_span.name == rs.name
assert child_span.start_time_ns == rs.start_time_ns
assert child_span.end_time_ns == rs.end_time_ns
assert child_span.status == rs.status
assert child_span.span_type == rs.span_type
assert child_span.events == rs.events
# exclude request ID attribute from comparison
for k in rs.attributes.keys() - {SpanAttributeKey.REQUEST_ID}:
assert child_span.attributes[k] == rs.attributes[k]
def test_add_trace_no_current_active_trace():
# Use the remote trace without any active trace
remote_trace = Trace.from_dict(_SAMPLE_REMOTE_TRACE)
mlflow.add_trace(remote_trace)
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
assert len(trace.data.spans) == 3
parent_span, child_span, grandchild_span = trace.data.spans
assert parent_span.name == "Remote Trace <remote>"
rs = remote_trace.data.spans[0]
assert parent_span.start_time_ns == rs.start_time_ns - 1
assert parent_span.end_time_ns == rs.end_time_ns
assert child_span.name == rs.name
assert child_span.parent_id is parent_span.span_id
assert child_span.start_time_ns == rs.start_time_ns
assert child_span.end_time_ns == rs.end_time_ns
assert child_span.status == rs.status
assert child_span.span_type == rs.span_type
assert child_span.events == rs.events
assert grandchild_span.parent_id == child_span.span_id
# exclude request ID attribute from comparison
for k in rs.attributes.keys() - {SpanAttributeKey.REQUEST_ID}:
assert child_span.attributes[k] == rs.attributes[k]
def test_add_trace_specific_target_span(mock_otel_trace_start_time):
span = start_span_no_context(name="parent")
mlflow.add_trace(_SAMPLE_REMOTE_TRACE, target=span)
span.end()
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
assert len(trace.data.spans) == 3
parent_span, child_span, grandchild_span = trace.data.spans
assert parent_span.span_id == span.span_id
rs = Trace.from_dict(_SAMPLE_REMOTE_TRACE).data.spans[0]
assert child_span.name == rs.name
assert child_span.parent_id is parent_span.span_id
assert grandchild_span.parent_id == child_span.span_id
def test_add_trace_merge_tags():
client = TracingClient()
# Start the parent trace and merge the above trace as a child
with mlflow.start_span(name="parent") as span:
client.set_trace_tag(span.trace_id, "vegetable", "carrot")
client.set_trace_tag(span.trace_id, "food", "sushi")
mlflow.add_trace(Trace.from_dict(_SAMPLE_REMOTE_TRACE))
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
custom_tags = {k: v for k, v in trace.info.tags.items() if not k.startswith("mlflow.")}
assert custom_tags == {
"fruit": "apple",
"vegetable": "carrot",
# Tag value from the parent trace should prevail
"food": "sushi",
}
def test_add_trace_raise_for_invalid_trace():
with pytest.raises(MlflowException, match="Invalid trace object"):
mlflow.add_trace(None)
with pytest.raises(MlflowException, match="Failed to load a trace object"):
mlflow.add_trace({"info": {}, "data": {}})
in_progress_trace = Trace(
info=TraceInfo(
trace_id="123",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=0,
execution_duration=0,
state=TraceState.IN_PROGRESS,
),
data=TraceData(),
)
with pytest.raises(MlflowException, match="The trace must be ended"):
mlflow.add_trace(in_progress_trace)
trace = Trace.from_dict(_SAMPLE_REMOTE_TRACE)
spans = trace.data.spans
unordered_trace = Trace(info=trace.info, data=TraceData(spans=[spans[1], spans[0]]))
with pytest.raises(MlflowException, match="Span with ID "):
mlflow.add_trace(unordered_trace)
@skip_when_testing_trace_sdk
def test_add_trace_in_databricks_model_serving(mock_databricks_serving_with_tracing_env):
from mlflow.pyfunc.context import Context, set_prediction_context
# Mimic a remote service call that returns a trace as a part of the response
def dummy_remote_call():
return {"prediction": 1, "trace": _SAMPLE_REMOTE_TRACE}
# The parent function that invokes the dummy remote service
@mlflow.trace
def predict():
resp = dummy_remote_call()
remote_trace = Trace.from_dict(resp["trace"])
mlflow.add_trace(remote_trace)
return resp["prediction"]
db_request_id = "databricks-request-id"
with set_prediction_context(Context(request_id=db_request_id)):
predict()
# Pop the trace to be written to the inference table
trace = Trace.from_dict(pop_trace(request_id=db_request_id))
assert trace.info.trace_id.startswith("tr-")
assert trace.info.client_request_id == db_request_id
assert len(trace.data.spans) == 3
assert all(span.trace_id == trace.info.trace_id for span in trace.data.spans)
parent_span, child_span, grandchild_span = trace.data.spans
assert child_span.parent_id == parent_span.span_id
assert child_span._trace_id == parent_span._trace_id
assert grandchild_span.parent_id == child_span.span_id
assert grandchild_span._trace_id == parent_span._trace_id
# Check if span information is correctly copied
rs = Trace.from_dict(_SAMPLE_REMOTE_TRACE).data.spans[0]
assert child_span.name == rs.name
assert child_span.start_time_ns == rs.start_time_ns
assert child_span.end_time_ns == rs.end_time_ns
@skip_when_testing_trace_sdk
def test_add_trace_logging_model_from_code():
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model="tests/tracing/sample_code/model_with_add_trace.py",
input_example=[1, 2],
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
# Trace should not be logged while logging / loading
assert mlflow.get_trace(mlflow.get_last_active_trace_id()) is None
loaded_model.predict(1)
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
assert trace is not None
assert len(trace.data.spans) == 2
@pytest.mark.parametrize(
"inputs", [{"question": "Does mlflow support tracing?"}, "Does mlflow support tracing?", None]
)
@pytest.mark.parametrize("outputs", [{"answer": "Yes"}, "Yes", None])
@pytest.mark.parametrize(
"intermediate_outputs",
[
{
"retrieved_documents": ["mlflow documentation"],
"system_prompt": ["answer the question with yes or no"],
},
None,
],
)
def test_log_trace_success(inputs, outputs, intermediate_outputs):
start_time_ms = 1736144700
execution_time_ms = 5129
mlflow.log_trace(
name="test",
request=inputs,
response=outputs,
intermediate_outputs=intermediate_outputs,
start_time_ms=start_time_ms,
execution_time_ms=execution_time_ms,
)
trace = mlflow.get_trace(mlflow.get_last_active_trace_id())
if inputs is not None:
assert trace.data.request == json.dumps(inputs)
else:
assert trace.data.request is None
if outputs is not None:
assert trace.data.response == json.dumps(outputs)
else:
assert trace.data.response is None
if intermediate_outputs is not None:
assert trace.data.intermediate_outputs == intermediate_outputs
spans = trace.data.spans
assert len(spans) == 1
root_span = spans[0]
assert root_span.name == "test"
assert root_span.start_time_ns == start_time_ms * 1000000
assert root_span.end_time_ns == (start_time_ms + execution_time_ms) * 1000000
def test_set_delete_trace_tag():
with mlflow.start_span("span1") as span:
trace_id = span.trace_id
mlflow.set_trace_tag(trace_id=trace_id, key="key1", value="value1")
trace = mlflow.get_trace(trace_id=trace_id)
assert trace.info.tags["key1"] == "value1"
mlflow.delete_trace_tag(trace_id=trace_id, key="key1")
trace = mlflow.get_trace(trace_id=trace_id)
assert "key1" not in trace.info.tags
# Test with request_id kwarg (backward compatibility)
mlflow.set_trace_tag(request_id=trace_id, key="key3", value="value3")
trace = mlflow.get_trace(request_id=trace_id)
assert trace.info.tags["key3"] == "value3"
mlflow.delete_trace_tag(request_id=trace_id, key="key3")
trace = mlflow.get_trace(request_id=trace_id)
assert "key3" not in trace.info.tags
@pytest.mark.parametrize("is_databricks", [True, False])
def test_search_traces_with_run_id_validates_store_filter_string(is_databricks):
mock_store = mock.MagicMock()
mock_store.search_traces.return_value = ([], None)
mock_store.get_run.return_value = mock.MagicMock()
mock_store.get_run.return_value.info.experiment_id = "test_exp_id"
test_run_id = "test_run_123"
with (
mock.patch("mlflow.tracing.client._get_store", return_value=mock_store),
mock.patch("mlflow.tracking.fluent._get_experiment_id", return_value="test_exp_id"),
):
mlflow.search_traces(run_id=test_run_id)
expected_filter_string = f"attribute.run_id = '{test_run_id}'"
mock_store.search_traces.assert_called()
call_args = mock_store.search_traces.call_args
actual_filter_string = call_args[1]["filter_string"]
assert actual_filter_string == expected_filter_string
def test_search_traces_with_locations(mock_client):
mock_client.search_traces.return_value = PagedList([], token=None)
# Test with locations
mlflow.search_traces(locations=["catalog1.schema1", "catalog2.schema2"])
# Verify that search_traces was called with locations
mock_client.search_traces.assert_called_once()
call_kwargs = mock_client.search_traces.call_args.kwargs
assert call_kwargs["locations"] == ["catalog1.schema1", "catalog2.schema2"]
assert call_kwargs.get("experiment_ids") is None
def test_search_traces_experiment_ids_deprecation_warning(mock_client):
mock_client.search_traces.return_value = PagedList([], token=None)
# Test that using experiment_ids shows a deprecation warning
with pytest.warns(FutureWarning, match="experiment_ids.*deprecated.*use.*locations"):
mlflow.search_traces(experiment_ids=["123"])
# Verify that search_traces was called and experiment_ids was converted to locations
mock_client.search_traces.assert_called_once()
call_kwargs = mock_client.search_traces.call_args.kwargs
assert call_kwargs["locations"] == ["123"]
assert call_kwargs["experiment_ids"] is None
def test_search_traces_with_sql_warehouse_id(mock_client):
mock_client.search_traces.return_value = PagedList([], token=None)
# Test with sql_warehouse_id
mlflow.search_traces(locations=["123"], sql_warehouse_id="warehouse456")
# Verify that search_traces was called with sql_warehouse_id
mock_client.search_traces.assert_called_once()
call_kwargs = mock_client.search_traces.call_args.kwargs
assert call_kwargs["locations"] == ["123"]
assert "sql_warehouse_id" not in call_kwargs
assert os.environ["MLFLOW_TRACING_SQL_WAREHOUSE_ID"] == "warehouse456"
@skip_when_testing_trace_sdk
def test_set_destination_in_threads(async_logging_enabled):
# This test makes sure `set_destination` obeys thread-local behavior.
class TestModel:
def predict(self, x):
with mlflow.start_span(name="root_span") as root_span:
def child_span_thread(z):
child_span = start_span_no_context(
name="child_span_1",
parent_span=root_span,
)
child_span.set_inputs(z)
time.sleep(0.5)
child_span.end()
thread = threading.Thread(target=child_span_thread, args=(x + 1,))
thread.start()
thread.join()
return x
model = TestModel()
def func(experiment_id: str | None, x: int):
if experiment_id is not None:
set_destination(MlflowExperiment(experiment_id), context_local=True)
time.sleep(0.5)
model.predict(x)
# Main thread: global config
experiment_id1 = mlflow.create_experiment(uuid.uuid4().hex)
set_destination(MlflowExperiment(experiment_id1))
func(None, 3)
# Thread 1: context-local config
experiment_id2 = mlflow.create_experiment(uuid.uuid4().hex)
thread1 = threading.Thread(target=func, args=(experiment_id2, 3))
# Thread 2: context-local config
experiment_id3 = mlflow.create_experiment(uuid.uuid4().hex)
thread2 = threading.Thread(target=func, args=(experiment_id3, 40))
# Thread 3: no config -> fallback to global config
thread3 = threading.Thread(target=func, args=(None, 40))
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
if async_logging_enabled:
mlflow.flush_trace_async_logging(terminate=True)
traces = get_traces(experiment_id1)
assert len(traces) == 2 # main thread + thread 3
assert traces[0].info.experiment_id == experiment_id1
assert len(traces[0].data.spans) == 2
assert traces[1].info.experiment_id == experiment_id1
assert len(traces[1].data.spans) == 2
for exp_id in [experiment_id2, experiment_id3]:
traces = get_traces(exp_id)
assert len(traces) == 1
assert traces[0].info.experiment_id == exp_id
assert len(traces[0].data.spans) == 2
@pytest.mark.asyncio
@skip_when_testing_trace_sdk
async def test_set_destination_in_async_contexts(async_logging_enabled):
class TestModel:
async def predict(self, x):
with mlflow.start_span(name="root_span") as root_span:
async def child_span_task(z):
child_span = start_span_no_context(
name="child_span_1",
parent_span=root_span,
)
child_span.set_inputs(z)
await asyncio.sleep(0.5)
child_span.end()
await child_span_task(x + 1)
return x
model = TestModel()
async def async_func(experiment_id: str, x: int):
set_destination(MlflowExperiment(experiment_id), context_local=True)
await asyncio.sleep(0.5)
await model.predict(x)
experiment_id1 = mlflow.create_experiment(uuid.uuid4().hex)
task1 = asyncio.create_task(async_func(experiment_id1, 3))
experiment_id2 = mlflow.create_experiment(uuid.uuid4().hex)
task2 = asyncio.create_task(async_func(experiment_id2, 40))
await asyncio.gather(task1, task2)
if async_logging_enabled:
mlflow.flush_trace_async_logging(terminate=True)
for exp_id in [experiment_id1, experiment_id2]:
traces = get_traces(exp_id)
assert len(traces) == 1
assert traces[0].info.experiment_id == exp_id
assert len(traces[0].data.spans) == 2
@skip_when_testing_trace_sdk
def test_traces_can_be_searched_by_span_properties(async_logging_enabled):
"""Smoke test that traces can be searched by span name using filter_string."""
@mlflow.trace(name="test_span")
def test_function():
return "result"
test_function()
if async_logging_enabled:
mlflow.flush_trace_async_logging(terminate=True)
traces = mlflow.search_traces(filter_string='span.name = "test_span"', return_type="list")
assert len(traces) == 1, "Should find exactly one trace with span name 'test_span'"
found_span_names = [span.name for span in traces[0].data.spans]
assert "test_span" in found_span_names
@pytest.mark.skipif(
IS_TRACING_SDK_ONLY, reason="Skipping test because mlflow or mlflow-skinny is not installed."
)
def test_search_traces_with_full_text():
with mlflow.start_span(name="test_span") as span:
span.set_attribute("llm.inputs", "How's the result?")
span.set_attribute("llm.outputs", "the number increased 90%")
trace_id_1 = span.trace_id
with mlflow.start_span(name="test_span") as span:
span.set_outputs({"outputs": 1234567})
span.set_attribute("test", "the number increased")
trace_id_2 = span.trace_id
with mlflow.start_span(name="test_span") as span:
span.set_attribute("test", "result including 'single quotes'")
trace_id_3 = span.trace_id
traces = mlflow.search_traces(
filter_string='trace.text LIKE "%How\'s the result?%"', return_type="list"
)
assert len(traces) == 1
assert traces[0].info.trace_id == trace_id_1
traces = mlflow.search_traces(filter_string='trace.text LIKE "%1234567%"', return_type="list")
assert len(traces) == 1
assert traces[0].info.trace_id == trace_id_2
traces = mlflow.search_traces(
filter_string="trace.text LIKE \"%result including 'single quotes'%\"", return_type="list"
)
assert len(traces) == 1
assert traces[0].info.trace_id == trace_id_3
traces = mlflow.search_traces(
filter_string='trace.text LIKE "%increased 90%%"', return_type="list"
)
assert len(traces) == 1
assert traces[0].info.trace_id == trace_id_1
| ErroringStreamTestModel |
python | ray-project__ray | python/ray/llm/_internal/serve/core/ingress/ingress.py | {
"start": 2511,
"end": 11006
} | class ____(Enum):
CHAT = "chat"
COMPLETIONS = "completions"
TRANSCRIPTIONS = "transcriptions"
NON_STREAMING_RESPONSE_TYPES = (
ChatCompletionResponse,
CompletionResponse,
TranscriptionResponse,
)
def _sanitize_chat_completion_request(
request: ChatCompletionRequest,
) -> ChatCompletionRequest:
"""Sanitize ChatCompletionRequest to fix Pydantic ValidatorIterator serialization issue.
This addresses a known Pydantic bug where tool_calls fields become ValidatorIterator
objects that cannot be pickled for Ray remote calls.
References:
- vLLM PR that introduces the workaround: https://github.com/vllm-project/vllm/pull/9951
- Pydantic Issue: https://github.com/pydantic/pydantic/issues/9467
- Related Issue: https://github.com/pydantic/pydantic/issues/9541
- Official Workaround: https://github.com/pydantic/pydantic/issues/9467#issuecomment-2442097291
TODO(seiji): Remove when we update to Pydantic v2.11+ with the fix.
"""
from vllm.transformers_utils.tokenizers.mistral import maybe_serialize_tool_calls
maybe_serialize_tool_calls(request)
return request
StreamResponseType = Union[
ChatCompletionStreamResponse, CompletionStreamResponse, TranscriptionStreamResponse
]
BatchedStreamResponseType = List[StreamResponseType]
DEFAULT_ENDPOINTS = {
"models": lambda app: app.get("/v1/models", response_model=ModelList),
"model_data": lambda app: app.get(
"/v1/models/{model:path}", response_model=ModelCard
),
"completions": lambda app: app.post("/v1/completions"),
"chat": lambda app: app.post("/v1/chat/completions"),
"embeddings": lambda app: app.post("/v1/embeddings"),
"transcriptions": lambda app: app.post(
"/v1/audio/transcriptions",
),
"score": lambda app: app.post("/v1/score"),
}
def init() -> FastAPI:
_fastapi_router_app = FastAPI(lifespan=metrics_lifespan)
# NOTE: PLEASE READ CAREFULLY BEFORE MODIFYING
#
# FastAPI middleware is executed in LIFO (last-in, first-out) order,
# hence maintaining current ordering is crucial as some of the middleware
# might have data dependency on the other: for ex, telemetry middleware
# depends on middleware generating request-id
#
# Add exception handling middleware
# NOTE: This middleware should be added first such that it's intercepting
# exceptions from the handlers, avoiding them propagating to other
# middleware (for ex, telemetry)
add_exception_handling_middleware(_fastapi_router_app)
# Configure CORS middleware
_fastapi_router_app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Add HTTP metrics middleware
add_http_metrics_middleware(_fastapi_router_app)
# Inject unique per-request ID
#
# NOTE: This middleware should be executed among the last (since
# middleware is executed in LIFO).
_fastapi_router_app.add_middleware(SetRequestIdMiddleware)
return _fastapi_router_app
def make_fastapi_ingress(
cls: Type,
*,
endpoint_map: Optional[Dict[str, Callable[[FastAPI], Callable]]] = None,
app: Optional[FastAPI] = None,
):
"""
Create a Ray Serve ingress deployment from a class and endpoint mapping.
Args:
cls: The class to convert into an ingress deployment
endpoint_map: Dictionary mapping method names to FastAPI route
decorators. Each value is a lambda that takes a FastAPI app and
returns a route decorator.
app: Optional FastAPI app to use for the ingress deployment. If not
provided, a new FastAPI app will be created.
Returns:
A class decorated with @serve.ingress
Example:
endpoint_map = {
"increment": lambda app: app.post("/increment"),
"get_counter": lambda app: app.get("/counter"),
}
# With additional FastAPI parameters:
endpoint_map = {
"increment": lambda app: app.post("/increment", status_code=201, tags=["counter"]),
"get_counter": lambda app: app.get("/counter", response_model=CounterResponse),
}
"""
if app is None:
app = init()
if endpoint_map is None:
endpoint_map = DEFAULT_ENDPOINTS
# Create a new class that inherits from the original to avoid modifying it
# in-place. We populate the new class's __dict__ with decorated methods.
class_dict = {}
# Apply route decorators to the class methods and store them in class_dict
for method_name, route_factory in endpoint_map.items():
# Get the route decorator from the lambda
route_decorator = route_factory(app)
# Get the original method from the class
original_method = getattr(cls, method_name)
# Apply the decorator to the original method
decorated_method = route_decorator(original_method)
# Store in the class dict so it will be properly bound to new_cls
class_dict[method_name] = decorated_method
# Create new class with the decorated methods in its __dict__.
# IMPORTANT: We keep the same __name__ and __qualname__ as the original
# class so that make_fastapi_class_based_view can properly identify the routes
# (it checks if cls.__qualname__ is in route.endpoint.__qualname__).
new_cls = type(cls.__name__, (cls,), class_dict)
new_cls.__qualname__ = cls.__qualname__
# Apply the serve.ingress decorator to the new class
return serve.ingress(app)(new_cls)
def _apply_openai_json_format(
response: Union[StreamResponseType, BatchedStreamResponseType],
) -> str:
"""Converts the stream response to OpenAI format.
Each model response is converted to the string:
data: <response-json1>\n\n
The converted strings are concatenated and returned:
data: <response-json1>\n\ndata: <response-json2>\n\n...
"""
if isinstance(response, list):
first_response = next(iter(response))
if isinstance(first_response, str):
return "".join(response)
if isinstance(first_response, dict):
return "".join(f"data: {json.dumps(r)}\n\n" for r in response)
if hasattr(first_response, "model_dump_json"):
return "".join(f"data: {r.model_dump_json()}\n\n" for r in response)
raise ValueError(
f"Unexpected response type: {type(first_response)}, {first_response=}"
)
if hasattr(response, "model_dump_json"):
return f"data: {response.model_dump_json()}\n\n"
if isinstance(response, str):
return response
raise ValueError(f"Unexpected response type: {type(response)}, {response=}")
async def _peek_at_generator(
gen: AsyncGenerator[T, None],
) -> Tuple[T, AsyncGenerator[T, None]]:
# Peek at the first element
first_item = await gen.__anext__()
# Create a new generator that yields the peeked item first
async def new_generator() -> AsyncGenerator[T, None]:
yield first_item
async for item in gen:
yield item
return first_item, new_generator()
async def _openai_json_wrapper(
generator: AsyncGenerator[
Union[StreamResponseType, BatchedStreamResponseType], None
],
) -> AsyncGenerator[str, None]:
"""Wrapper that converts stream responses into OpenAI JSON strings.
Args:
generator: an async generator that yields either individual stream responses
(StreamResponseType) or batches of stream responses (BatchedStreamResponseType).
Each response is converted into OpenAI JSON format and streamed to the client.
For batched responses, the items are concatenated together as a single string.
Yields:
String chunks in OpenAI SSE format: "data: {json}\n\n", with a final
"data: [DONE]\n\n" to indicate completion.
"""
async for response in generator:
packet = _apply_openai_json_format(response)
yield packet
yield "data: [DONE]\n\n"
@asynccontextmanager
async def router_request_timeout(timeout_duration: float):
try:
async with timeout(timeout_duration):
yield
except asyncio.TimeoutError as e:
raise OpenAIHTTPException(
status_code=status.HTTP_408_REQUEST_TIMEOUT,
message="Request server side timeout",
internal_message=str(e),
)
| CallMethod |
python | realpython__materials | python-callable-instances/init_vs_call.py | {
"start": 0,
"end": 273
} | class ____:
def __init__(self, attr):
print(f"Initialize an instance of {self.__class__.__name__}")
self.attr = attr
print(f"{self.attr=}")
def __call__(self, arg):
print(f"Call an instance of {self.__class__.__name__} with {arg}")
| Demo |
python | openai__openai-python | src/openai/types/beta/thread_update_params.py | {
"start": 1481,
"end": 1790
} | class ____(TypedDict, total=False):
vector_store_ids: SequenceNotStr[str]
"""
The
[vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
attached to this thread. There can be a maximum of 1 vector store attached to
the thread.
"""
| ToolResourcesFileSearch |
python | numba__numba | numba/tests/test_doctest.py | {
"start": 74,
"end": 795
} | class ____(TestCase):
def test_basic_decorators(self):
from . import doctest_usecase
# Make sure the finder see all the doctest
finder = doctest.DocTestFinder()
tests = finder.find(doctest_usecase)
testnames = {x.name for x in tests}
expected = {
'numba.tests.doctest_usecase',
'numba.tests.doctest_usecase.a',
'numba.tests.doctest_usecase.b',
'numba.tests.doctest_usecase.c',
'numba.tests.doctest_usecase.d',
}
self.assertEqual(testnames, expected)
# Execute the doctest in the module
doctest.testmod(doctest_usecase)
if __name__ == "__main__":
unittest.main()
| TestDocTest |
python | huggingface__transformers | src/transformers/models/sam2/modeling_sam2.py | {
"start": 22469,
"end": 23053
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
hidden-states at the output of the last layer of the model.
intermediate_hidden_states (`tuple[torch.FloatTensor]` of shape `(batch_size, height, width, hidden_size)`):
Sequence of hidden-states at the output of the intermediate layers of the model.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@auto_docstring
| Sam2HieraDetModelOutput |
python | cython__cython | Cython/Tests/TestCodeWriter.py | {
"start": 41,
"end": 3783
} | class ____(CythonTest):
# CythonTest uses the CodeWriter heavily, so do some checking by
# roundtripping Cython code through the test framework.
# Note that this test is dependent upon the normal Cython parser
# to generate the input trees to the CodeWriter. This save *a lot*
# of time; better to spend that time writing other tests than perfecting
# this one...
# Whitespace is very significant in this process:
# - always newline on new block (!)
# - indent 4 spaces
# - 1 space around every operator
def t(self, codestr):
self.assertCode(codestr, self.fragment(codestr).root)
def test_print(self):
self.t("""
print(x + y ** 2)
print(x, y, z)
print(x + y, x + y * z, x * (y + z))
""")
def test_if(self):
self.t("if x:\n pass")
def test_ifelifelse(self):
self.t("""
if x:
pass
elif y:
pass
elif z + 34 ** 34 - 2:
pass
else:
pass
""")
def test_def(self):
self.t("""
def f(x, y, z):
pass
def f(x = 34, y = 54, z):
pass
""")
def test_cdef(self):
self.t("""
cdef f(x, y, z):
pass
cdef public void (x = 34, y = 54, z):
pass
cdef f(int *x, void *y, Value *z):
pass
cdef f(int **x, void **y, Value **z):
pass
cdef inline f(int &x, Value &z):
pass
""")
def test_longness_and_signedness(self):
self.t("def f(unsigned long long long long long int y):\n pass")
def test_signed_short(self):
self.t("def f(signed short int y):\n pass")
def test_typed_args(self):
self.t("def f(int x, unsigned long int y):\n pass")
def test_cdef_var(self):
self.t("""
cdef int hello
cdef int hello = 4, x = 3, y, z
""")
def test_for_loop(self):
self.t("""
for x, y, z in f(g(h(34) * 2) + 23):
print(x, y, z)
else:
print(43)
""")
self.t("""
for abc in (1, 2, 3):
print(x, y, z)
else:
print(43)
""")
def test_while_loop(self):
self.t("""
while True:
while True:
while True:
continue
""")
def test_inplace_assignment(self):
self.t("x += 43")
def test_cascaded_assignment(self):
self.t("x = y = z = abc = 43")
def test_attribute(self):
self.t("a.x")
def test_return_none(self):
self.t("""
def f(x, y, z):
return
cdef f(x, y, z):
return
def f(x, y, z):
return None
cdef f(x, y, z):
return None
def f(x, y, z):
return 1234
cdef f(x, y, z):
return 1234
""")
if __name__ == "__main__":
import unittest
unittest.main()
| TestCodeWriter |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 37870,
"end": 38930
} | class ____(Glyph, LineGlyph):
''' Render parabolas.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/Quadratic.py"
_args = ("x0", "y0", "x1", "y1", "cx", "cy")
x0 = NumberSpec(default=field("x0"), help="""
The x-coordinates of the starting points.
""")
y0 = NumberSpec(default=field("y0"), help="""
The y-coordinates of the starting points.
""")
x1 = NumberSpec(default=field("x1"), help="""
The x-coordinates of the ending points.
""")
y1 = NumberSpec(default=field("y1"), help="""
The y-coordinates of the ending points.
""")
cx = NumberSpec(default=field("cx"), help="""
The x-coordinates of the control points.
""")
cy = NumberSpec(default=field("cy"), help="""
The y-coordinates of the control points.
""")
line_props = Include(LineProps, help="""
The {prop} values for the parabolas.
""")
| Quadratic |
python | run-llama__llama_index | llama-index-integrations/program/llama-index-program-evaporate/llama_index/program/evaporate/df.py | {
"start": 2876,
"end": 4755
} | class ____(BasePydanticProgram[DataFrame]):
"""
Data-frame program.
Extracts text into a schema + datapoints.
"""
def __init__(
self,
pydantic_program_cls: Type[BaseLLMFunctionProgram],
df_parser_template_str: str = DEFAULT_FULL_DF_PARSER_TMPL,
input_key: str = "input_str",
**program_kwargs: Any,
) -> None:
"""Init params."""
pydantic_program = pydantic_program_cls.from_defaults(
DataFrame, df_parser_template_str, **program_kwargs
)
self._validate_program(pydantic_program)
self._pydantic_program = pydantic_program
self._input_key = input_key
@classmethod
def from_defaults(
cls,
pydantic_program_cls: Optional[Type[BaseLLMFunctionProgram]] = None,
df_parser_template_str: str = DEFAULT_FULL_DF_PARSER_TMPL,
input_key: str = "input_str",
) -> "DFFullProgram":
"""Full DF output parser."""
pydantic_program_cls = pydantic_program_cls or FunctionCallingProgram
return cls(
pydantic_program_cls,
df_parser_template_str=df_parser_template_str,
input_key=input_key,
)
def _validate_program(self, pydantic_program: BasePydanticProgram) -> None:
if pydantic_program.output_cls != DataFrame:
raise ValueError("Output class of pydantic program must be `DataFrame`.")
@property
def output_cls(self) -> Type[DataFrame]:
"""Output class."""
return DataFrame
def __call__(self, *args: Any, **kwds: Any) -> DataFrame:
"""Call."""
if self._input_key not in kwds:
raise ValueError(f"Input key {self._input_key} not found in kwds.")
result = self._pydantic_program(**{self._input_key: kwds[self._input_key]})
return cast(DataFrame, result)
| DFFullProgram |
python | tiangolo__fastapi | docs_src/header_param_models/tutorial002_py39.py | {
"start": 112,
"end": 430
} | class ____(BaseModel):
model_config = {"extra": "forbid"}
host: str
save_data: bool
if_modified_since: Union[str, None] = None
traceparent: Union[str, None] = None
x_tag: list[str] = []
@app.get("/items/")
async def read_items(headers: CommonHeaders = Header()):
return headers
| CommonHeaders |
python | Textualize__textual | examples/five_by_five.py | {
"start": 978,
"end": 2083
} | class ____(Label):
"""Widget to tell the user they have won."""
MIN_MOVES: Final = 14
"""int: The minimum number of moves you can solve the puzzle in."""
@staticmethod
def _plural(value: int) -> str:
return "" if value == 1 else "s"
def show(self, moves: int) -> None:
"""Show the winner message.
Args:
moves (int): The number of moves required to win.
"""
self.update(
"W I N N E R !\n\n\n"
f"You solved the puzzle in {moves} move{self._plural(moves)}."
+ (
(
f" It is possible to solve the puzzle in {self.MIN_MOVES}, "
f"you were {moves - self.MIN_MOVES} move{self._plural(moves - self.MIN_MOVES)} over."
)
if moves > self.MIN_MOVES
else " Well done! That's the minimum number of moves to solve the puzzle!"
)
)
self.add_class("visible")
def hide(self) -> None:
"""Hide the winner message."""
self.remove_class("visible")
| WinnerMessage |
python | scipy__scipy | scipy/sparse/linalg/_eigen/arpack/arpack.py | {
"start": 26216,
"end": 39388
} | class ____(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0, rng=None):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError(f"mode={mode} not implemented")
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of"
f" {' '.join(_NEUPD_WHICH)}")
if k >= n - 1:
raise ValueError(f"k must be less than ndim(A)-1, k={k}")
self.rng = np.random.default_rng(rng)
_ArpackParams.__init__(self, n, k, tp, rng, mode, sigma, ncv, v0, maxiter,
which, tol)
self.arpack_dict['bmat'] = 0 if self.bmat == 'I' else 1
if self.ncv > n or self.ncv <= k + 1:
raise ValueError(f"ncv must be k+1<ncv<=n, ncv={self.ncv}")
self.workd = np.zeros(3 * n, dtype=self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), dtype=self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpacklib.__dict__[ltr + 'naupd_wrap']
self._arpack_extract = _arpacklib.__dict__[ltr + 'neupd_wrap']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, dtype=np.int32)
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, dtype=self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self._arpack_solver(
self.arpack_dict, self.resid, self.v, self.ipntr, self.workd,
self.workl
)
else:
self._arpack_solver(
self.arpack_dict, self.resid, self.v, self.ipntr, self.workd,
self.workl, self.rwork
)
xslice = slice(self.ipntr[0], self.ipntr[0] + self.n)
yslice = slice(self.ipntr[1], self.ipntr[1] + self.n)
if self.arpack_dict['ido'] == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2], self.ipntr[2] + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.arpack_dict['ido'] == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.arpack_dict['ido'] == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
elif self.arpack_dict['ido'] == 4:
if self.tp in 'fd':
# Generate random vector into resid
self.resid[:] = self.rng.uniform(low=-1.0, high=1.0,
size=[self.n]).astype(self.tp)
else:
# Generate complex random vector into resid
self.resid[:] = self.rng.uniform(low=-1.0, high=1.0, size=[self.n, 2]
).view(np.complex128).astype(self.tp).ravel()
elif self.arpack_dict['ido'] == 5:
self.workd[yslice] = self.OP(self.workd[xslice])
else:
self.converged = True
if self.arpack_dict['info'] == 0:
pass
elif self.arpack_dict['info'] == 1:
self._raise_no_convergence()
else:
raise ArpackError(info=self.arpack_dict['info'],
infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
self.arpack_dict['info'] = 0 # Clear, if any, previous error from naupd
howmny = HOWMNY_DICT['A'] # return all eigenvectors
sselect = np.zeros(self.ncv, dtype=np.int32)
sigmar = float(np.real(self.sigma))
sigmai = float(np.imag(self.sigma))
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros([k + 1], dtype=self.tp)
di = np.zeros([k + 1], dtype=self.tp)
# Using a Fortran ordered array for NumPy parse the result correctly
zr = np.zeros([n, k + 1], dtype=self.tp, order='F')
# ARPACK _neupd call
self._arpack_extract(
self.arpack_dict, return_eigenvectors, howmny, sselect, dr, di,
zr, sigmar, sigmai, workev, self.resid, self.v, self.ipntr,
self.workd, self.workl
)
ierr = self.arpack_dict['info']
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.arpack_dict['nconv'] # number of good eigs returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
if self.mode in (1, 2):
rd = d
elif self.mode in (3, 4):
rd = 1 / (d - self.sigma)
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) (complex pairs come together)
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
ind = ind[-k:][::-1]
elif self.which in ['SR', 'SM', 'SI']:
ind = ind[:k]
d = d[ind]
z = z[:, ind]
else:
d = np.zeros([k], dtype=self.tp)
z = np.zeros([n, k], dtype=self.tp, order='F')
self._arpack_extract(
self.arpack_dict, return_eigenvectors, howmny, sselect, d, z,
self.sigma, workev, self.resid, self.v, self.ipntr, self.workd,
self.workl, self.rwork)
ierr = self.arpack_dict['info']
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.arpack_dict['nconv']
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
| _UnsymmetricArpackParams |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 57510,
"end": 57690
} | class ____:
xlDisabled = 0 # from enum XlEnableCancelKey
xlErrorHandler = 2 # from enum XlEnableCancelKey
xlInterrupt = 1 # from enum XlEnableCancelKey
| EnableCancelKey |
python | kamyu104__LeetCode-Solutions | Python/maximize-win-from-two-segments.py | {
"start": 64,
"end": 583
} | class ____(object):
def maximizeWin(self, prizePositions, k):
"""
:type prizePositions: List[int]
:type k: int
:rtype: int
"""
dp = [0]*(len(prizePositions)+1)
result = left = 0
for right in xrange(len(prizePositions)):
while prizePositions[right]-prizePositions[left] > k:
left += 1
dp[right+1] = max(dp[right], right-left+1)
result = max(result, dp[left]+(right-left+1))
return result
| Solution |
python | celery__celery | celery/utils/collections.py | {
"start": 22623,
"end": 25432
} | class ____(OrderedDict, Evictable):
"""Map of buffers."""
Buffer = Messagebuffer
Empty = Empty
maxsize = None
total = 0
bufmaxsize = None
def __init__(self, maxsize, iterable=None, bufmaxsize=1000):
# type: (int, Iterable, int) -> None
super().__init__()
self.maxsize = maxsize
self.bufmaxsize = 1000
if iterable:
self.update(iterable)
self.total = sum(len(buf) for buf in self.items())
def put(self, key, item):
# type: (Any, Any) -> None
self._get_or_create_buffer(key).put(item)
self.total += 1
self.move_to_end(key) # least recently used.
self.maxsize and self._evict()
def extend(self, key, it):
# type: (Any, Iterable) -> None
self._get_or_create_buffer(key).extend(it)
self.total += len(it)
self.maxsize and self._evict()
def take(self, key, *default):
# type: (Any, *Any) -> Any
item, throw = None, False
try:
buf = self[key]
except KeyError:
throw = True
else:
try:
item = buf.take()
self.total -= 1
except self.Empty:
throw = True
else:
self.move_to_end(key) # mark as LRU
if throw:
if default:
return default[0]
raise self.Empty()
return item
def _get_or_create_buffer(self, key):
# type: (Any) -> Messagebuffer
try:
return self[key]
except KeyError:
buf = self[key] = self._new_buffer()
return buf
def _new_buffer(self):
# type: () -> Messagebuffer
return self.Buffer(maxsize=self.bufmaxsize)
def _LRUpop(self, *default):
# type: (*Any) -> Any
return self[self._LRUkey()].take(*default)
def _pop_to_evict(self):
# type: () -> None
for _ in range(100):
key = self._LRUkey()
buf = self[key]
try:
buf.take()
except (IndexError, self.Empty):
# buffer empty, remove it from mapping.
self.pop(key)
else:
# we removed one item
self.total -= 1
# if buffer is empty now, remove it from mapping.
if not len(buf):
self.pop(key)
else:
# move to least recently used.
self.move_to_end(key)
break
def __repr__(self):
# type: () -> str
return f'<{type(self).__name__}: {self.total}/{self.maxsize}>'
@property
def _evictcount(self):
# type: () -> int
return self.total
| BufferMap |
python | huggingface__transformers | src/transformers/models/edgetam/modeling_edgetam.py | {
"start": 11619,
"end": 12670
} | class ____(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
activation: str = "relu",
sigmoid_output: bool = False,
):
super().__init__()
self.num_layers = num_layers
self.activation = ACT2FN[activation]
self.proj_in = nn.Linear(input_dim, hidden_dim)
self.proj_out = nn.Linear(hidden_dim, output_dim)
self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)])
self.sigmoid_output = sigmoid_output
def forward(self, hidden_states):
hidden_states = self.proj_in(hidden_states)
hidden_states = self.activation(hidden_states)
for layer in self.layers:
hidden_states = self.activation(layer(hidden_states))
hidden_states = self.proj_out(hidden_states)
if self.sigmoid_output:
hidden_states = F.sigmoid(hidden_states)
return hidden_states
@auto_docstring
| EdgeTamFeedForward |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_health.py | {
"start": 3803,
"end": 4033
} | class ____(graphene.ObjectType):
failedRunId = graphene.NonNull(graphene.String)
class Meta:
name = "AssetHealthMaterializationDegradedNotPartitionedMeta"
| GrapheneAssetHealthMaterializationDegradedNotPartitionedMeta |
python | openai__openai-python | src/openai/_module_client.py | {
"start": 1389,
"end": 1508
} | class ____(LazyProxy["Beta"]):
@override
def __load__(self) -> Beta:
return _load_client().beta
| BetaProxy |
python | scipy__scipy | scipy/stats/tests/test_morestats.py | {
"start": 135515,
"end": 140338
} | class ____:
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
ties="foo")
def test_bad_nan_policy(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
nan_policy='foobar')
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency
# table, so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_nan_policy_options(self):
x = [1, 2, np.nan]
y = [4, 5, 6]
mt1 = stats.median_test(x, y, nan_policy='propagate')
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
assert_allclose(s, 0.31250000000000006)
assert_allclose(p, 0.57615012203057869)
assert_equal(m, 4.0)
assert_equal(t, np.array([[0, 2], [2, 1]]))
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
@pytest.mark.parametrize("correction", [False, True])
def test_result(self, correction):
x = [1, 2, 3]
y = [1, 2, 3]
res = stats.median_test(x, y, correction=correction)
assert_equal((res.statistic, res.pvalue, res.median, res.table), res)
@make_xp_test_case(stats.directional_stats)
| TestMedianTest |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/trackable_view_test.py | {
"start": 869,
"end": 1585
} | class ____(test.TestCase):
def test_children(self):
root = base.Trackable()
leaf = base.Trackable()
root._track_trackable(leaf, name="leaf")
(current_name,
current_dependency), = trackable_view.TrackableView.children(root).items()
self.assertIs(leaf, current_dependency)
self.assertEqual("leaf", current_name)
def test_descendants(self):
root = base.Trackable()
leaf = base.Trackable()
root._track_trackable(leaf, name="leaf")
descendants = trackable_view.TrackableView(root).descendants()
self.assertIs(2, len(descendants))
self.assertIs(root, descendants[0])
self.assertIs(leaf, descendants[1])
if __name__ == "__main__":
test.main()
| TrackableViewTest |
python | astropy__astropy | astropy/table/tests/test_masked.py | {
"start": 12151,
"end": 14656
} | class ____:
def test_add_masked_column_to_masked_table(self):
t = Table(masked=True)
assert t.masked
t.add_column(MaskedColumn(name="a", data=[1, 2, 3], mask=[0, 1, 0]))
assert t.masked
t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert isinstance(t["a"], MaskedColumn)
assert isinstance(t["b"], MaskedColumn)
assert np.all(t["a"] == np.array([1, 2, 3]))
assert np.all(t["a"].mask == np.array([0, 1, 0], bool))
assert np.all(t["b"] == np.array([4, 5, 6]))
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_masked_column_to_non_masked_table(self):
t = Table(masked=False)
assert not t.masked
t.add_column(Column(name="a", data=[1, 2, 3]))
assert not t.masked
t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1]))
assert not t.masked # Changed in 4.0, table no longer auto-upgrades
assert isinstance(t["a"], Column) # Was MaskedColumn before 4.0
assert isinstance(t["b"], MaskedColumn)
assert np.all(t["a"] == np.array([1, 2, 3]))
assert not hasattr(t["a"], "mask")
assert np.all(t["b"] == np.array([4, 5, 6]))
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_add_non_masked_column_to_masked_table(self):
t = Table(masked=True)
assert t.masked
t.add_column(Column(name="a", data=[1, 2, 3]))
assert t.masked
t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[1, 0, 1]))
assert t.masked
assert isinstance(t["a"], MaskedColumn)
assert isinstance(t["b"], MaskedColumn)
assert np.all(t["a"] == np.array([1, 2, 3]))
assert np.all(t["a"].mask == np.array([0, 0, 0], bool))
assert np.all(t["b"] == np.array([4, 5, 6]))
assert np.all(t["b"].mask == np.array([1, 0, 1], bool))
def test_convert_to_masked_table_only_if_necessary(self):
# Do not convert to masked table, if new column has no masked value.
# See #1185 for details.
t = Table(masked=False)
assert not t.masked
t.add_column(Column(name="a", data=[1, 2, 3]))
assert not t.masked
t.add_column(MaskedColumn(name="b", data=[4, 5, 6], mask=[0, 0, 0]))
assert not t.masked
assert np.all(t["a"] == np.array([1, 2, 3]))
assert np.all(t["b"] == np.array([4, 5, 6]))
| TestAddColumn |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_tensor.py | {
"start": 95635,
"end": 113104
} | class ____(
type_spec.BatchableTypeSpec, internal_types.RaggedTensorSpec):
"""Type specification for a `tf.RaggedTensor`."""
__slots__ = [
"_shape", "_dtype", "_ragged_rank", "_row_splits_dtype",
"_flat_values_spec"
]
@property
def dtype(self):
"""The `tf.dtypes.DType` specified by this type for the RaggedTensor.
Examples:
>>> rt = tf.ragged.constant([["a"], ["b", "c"]], dtype=tf.string)
>>> tf.type_spec_from_value(rt).dtype
tf.string
Returns:
A `tf.dtypes.DType` of the values in the RaggedTensor.
"""
return self._dtype
@property
def shape(self):
"""The statically known shape of the RaggedTensor.
Examples:
>>> rt = tf.ragged.constant([[0], [1, 2]])
>>> tf.type_spec_from_value(rt).shape
TensorShape([2, None])
>>> rt = tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1)
>>> tf.type_spec_from_value(rt).shape
TensorShape([2, None, 2])
Returns:
A `tf.TensorShape` containing the statically known shape of the
RaggedTensor. Ragged dimensions have a size of `None`.
"""
return self._shape
@property
def ragged_rank(self):
"""The number of times the RaggedTensor's flat_values is partitioned.
Defaults to `shape.ndims - 1`.
Examples:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> tf.type_spec_from_value(values).ragged_rank
1
>>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> tf.type_spec_from_value(rt1).ragged_rank
2
Returns:
A Python `int` indicating the number of times the underlying `flat_values`
Tensor has been partitioned to add a new dimension.
I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.
"""
return self._ragged_rank
@property
def row_splits_dtype(self):
"""The `tf.dtypes.DType` of the RaggedTensor's `row_splits`.
Examples:
>>> rt = tf.ragged.constant([[1, 2, 3], [4]], row_splits_dtype=tf.int64)
>>> tf.type_spec_from_value(rt).row_splits_dtype
tf.int64
Returns:
A `tf.dtypes.DType` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
"""
return self._row_splits_dtype
@property
def flat_values_spec(self):
"""The `TypeSpec` of the flat_values of RaggedTensor.
Returns:
- The TypeSpec of flat_values.
- None when the flat_values is a Tensor.
"""
return self._flat_values_spec
@property
def value_type(self):
return RaggedTensor if self._ragged_rank > 0 else tensor_lib.Tensor
def __init__(self,
shape=None,
dtype=dtypes.float32,
ragged_rank=None,
row_splits_dtype=dtypes.int64,
flat_values_spec=None):
"""Constructs a type specification for a `tf.RaggedTensor`.
Args:
shape: The shape of the RaggedTensor, or `None` to allow any shape. If a
shape is specified, then all ragged dimensions must have size `None`.
dtype: `tf.DType` of values in the RaggedTensor.
ragged_rank: Python integer, the number of times the RaggedTensor's
flat_values is partitioned. Defaults to `shape.ndims - 1`.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
flat_values_spec: TypeSpec for flat_value of the RaggedTensor. It shall be
provided when the flat_values is a CompositeTensor rather then Tensor.
If both `dtype` and `flat_values_spec` and are provided, `dtype` must
be the same as `flat_values_spec.dtype`. (experimental)
"""
self._shape = tensor_shape.as_shape(shape)
self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if flat_values_spec is not None:
if dtype is None:
dtype = flat_values_spec.dtype
elif dtype != flat_values_spec.dtype:
raise ValueError("dtype must be the same as flat_values_spec.dtype")
elif dtype is None:
raise ValueError(
"At least one of dtype or flat_values_spec must be provided")
self._dtype = dtypes.as_dtype(dtype)
self._flat_values_spec = flat_values_spec
rank = self._shape.ndims
if ragged_rank is None:
if rank is None:
raise ValueError("Must specify ragged_rank or "
"a shape with a known rank.")
ragged_rank = rank - 1
self._ragged_rank = ragged_rank
if not isinstance(self._ragged_rank, int):
raise TypeError(f"Argument `ragged_rank` must be an int. "
f"Received {ragged_rank}.")
if rank is not None:
if ragged_rank >= rank:
raise ValueError(f"Argument `ragged_rank` ({ragged_rank}) must be less "
f"than rank ({rank}).")
def is_compatible_with(self, spec_or_value):
# RaggedTensor with ragged_rank 0 can be compatible with raw flat_values.
if self._ragged_rank == 0:
if self._flat_values_spec is None:
if isinstance(
spec_or_value, (tensor_lib.Tensor, tensor_lib.TensorSpec)):
return tensor_lib.TensorSpec(
self._shape, self._dtype).is_compatible_with(spec_or_value)
elif not isinstance(spec_or_value, (RaggedTensor, RaggedTensorSpec)):
return self._flat_values_spec.is_compatible_with(spec_or_value)
return super(RaggedTensorSpec, self).is_compatible_with(spec_or_value)
def _serialize(self):
if self._flat_values_spec is None:
return (self._shape, self._dtype, self._ragged_rank,
self._row_splits_dtype)
else:
return (self._shape, self._dtype, self._ragged_rank,
self._row_splits_dtype, self._flat_values_spec)
@property
def _component_specs(self):
if self._ragged_rank <= 0:
if self._flat_values_spec is not None:
return [self._flat_values_spec]
else:
return [tensor_lib.TensorSpec(self._shape, self._dtype)]
flat_values_spec = self._flat_values_spec
if flat_values_spec is None:
flat_values_shape = tensor_shape.TensorShape([None]).concatenate(
self._shape[self._ragged_rank + 1:])
flat_values_spec = tensor_lib.TensorSpec(flat_values_shape, self._dtype)
outer_dim = tensor_shape.dimension_at_index(self._shape, 0)
outer_splits_shape = [None if outer_dim is None else outer_dim + 1]
inner_splits_spec = tensor_lib.TensorSpec([None], self._row_splits_dtype)
specs = ([
flat_values_spec,
tensor_lib.TensorSpec(outer_splits_shape, self._row_splits_dtype)
] + [inner_splits_spec for _ in range(self._ragged_rank - 1)])
return specs
def _to_components(self, value):
if is_ragged(value):
return [value.flat_values] + list(value.nested_row_splits)
else:
return [value]
def _from_components(self, tensor_list):
result = tensor_list[0]
if (all(isinstance(t, np.ndarray) for t in tensor_list) and
not tf2.enabled()):
for row_splits in reversed(tensor_list[1:]):
result = ragged_tensor_value.RaggedTensorValue(result, row_splits)
else:
if isinstance(tensor_list[0], np.ndarray):
tensor_list = [ops.convert_to_tensor(t) for t in tensor_list]
result = tensor_list[0]
for row_splits in reversed(tensor_list[1:]):
result = RaggedTensor(
result,
RowPartition.from_row_splits(row_splits, validate=False),
internal=True)
if self._shape.ndims is not None:
if isinstance(result, RaggedTensor):
result._set_shape(self._shape) # pylint: disable=protected-access
# TODO(xjun): MaskedTensor doesn't implement set_shape.
if self.flat_values_spec is not None and hasattr(result.flat_values,
"set_shape"):
result.flat_values.set_shape(self.flat_values_spec.shape)
elif isinstance(result, tensor_lib.Tensor):
result.set_shape(self._shape)
return result
# The RaggedTensorSpec tensor_list encoding uses to/from_variant ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mishragaurav): The default flat shape of a boxed `RaggedTensor` is
# `[]` (scalar), but a `RaggedTensorSpec` can also represent a batch of
# boxed `RaggedTensor` objects with shape `(...)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_lib.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
# TODO(edloper): Update gen_ragged_conversion_ops that convert to and
# from variant to include all of the row-partitioning tensors.
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
if isinstance(value, RaggedTensor):
if value.ragged_rank != self._ragged_rank:
raise ValueError(
f"Ragged rank of value {value.ragged_rank} does not match "
f"ragged rank of type {self._ragged_rank}.")
# pylint: disable=protected-access
return [value._to_variant(batched_input=False)]
else:
if self._ragged_rank > 0:
raise ValueError(
f"Expected a RaggedTensor if ragged rank={self._ragged_rank}"
f" but got {type(value).__name__}."
)
return [
gen_ragged_conversion_ops.ragged_tensor_to_variant(
(), value, batched_input=False)
]
def _to_batched_tensor_list(self, value):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
if isinstance(value, RaggedTensor):
if value.ragged_rank != self._ragged_rank:
raise ValueError(
f"Ragged rank of value {value.ragged_rank} does not match "
f"ragged rank of type {self._ragged_rank}.")
# pylint: disable=protected-access
return [value._to_variant(batched_input=True)]
else:
if self._ragged_rank > 0:
raise ValueError(
f"Expected a RaggedTensor if ragged rank={self._ragged_rank}"
f" but got {type(value).__name__}."
)
return [
gen_ragged_conversion_ops.ragged_tensor_to_variant(
rt_nested_splits=(), rt_dense_values=value, batched_input=True)
]
def _from_compatible_tensor_list(self, tensor_list):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
result = RaggedTensor._from_variant( # pylint: disable=protected-access
tensor_list[0],
dtype=self._dtype,
row_splits_dtype=self._row_splits_dtype,
output_ragged_rank=self._ragged_rank)
if self._shape.ndims is not None:
if isinstance(result, RaggedTensor):
result._set_shape(self._shape) # pylint: disable=protected-access
# TODO(xjun): MaskedTensor doesn't implement set_shape.
if self.flat_values_spec is not None and hasattr(self.flat_values,
"set_shape"):
result.flat_values.set_shape(self.flat_values_spec.shape)
else:
result.set_shape(self._shape)
return result
def _batch(self, batch_size):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
return RaggedTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype, self._ragged_rank + 1, self._row_splits_dtype)
def _unbatch(self):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
# Note: Negative ragged_rank is allowed here because the dataset could be
# subsequently batched again. If ragged_rank > 1, assume row_splits_dtype is
# consistent. Errors are handled in
# RaggedTensorSpec._from_compatible_tensor_list()
return RaggedTensorSpec(self._shape[1:], self._dtype, self._ragged_rank - 1,
self._row_splits_dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return self
@classmethod
def from_value(cls, value):
if (isinstance(value, ragged_tensor_value.RaggedTensorValue) or
isinstance(value.flat_values, tensor_lib.Tensor)):
return cls(
shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype)
else:
flat_values_spec = type_spec.type_spec_from_value(value.flat_values)
# Relax shape[0] to None, as it is connected to dynamic ragged shapes.
flat_values_spec = flat_values_spec._unbatch()._batch(None) # pylint: disable=protected-access
return cls(
shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype,
flat_values_spec=flat_values_spec)
nested_structure_coder.register_codec(
nested_structure_coder.BuiltInTypeSpecCodec(
RaggedTensorSpec, struct_pb2.TypeSpecProto.RAGGED_TENSOR_SPEC
)
)
type_spec.register_type_spec_from_value_converter(
ragged_tensor_value.RaggedTensorValue, RaggedTensorSpec.from_value)
# ===============================================================================
# Convert value -> tensor
# ===============================================================================
def convert_to_tensor_or_ragged_tensor(value,
dtype=None,
preferred_dtype=None,
name=None):
"""Converts value to a `RaggedTensor` or `Tensor`.
* If `value` is a `RaggedTensor`, then return it as-is.
* If `value` is a `RaggedTensorValue`, return a corresponding constant
`RaggedTensor`.
* Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`.
Args:
value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has
a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing the type
is inferred from the type of `value`.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. This argument has no effect if `value` is already a
tensor, or when conversion is not possible.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `RaggedTensor`.
"""
if isinstance(value, RaggedTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(f"Tensor conversion requested dtype {dtype.name} for "
f"RaggedTensor with dtype {value.dtype.name}: {value}.")
return value
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []):
flat_values = ops.convert_to_tensor(
value=value.flat_values,
dtype=dtype,
dtype_hint=preferred_dtype,
name="flat_values")
return RaggedTensor.from_nested_row_splits(
flat_values, value.nested_row_splits, validate=False)
else:
return tensor_conversion.convert_to_tensor_v2_with_dispatch(
value=value, dtype=dtype, dtype_hint=preferred_dtype, name=name
)
def _convert_to_ragged_tensor_values(value):
"""Converts value to supported RaggedTensor value.
* If `value` is an object of supported value type, then return it as-is.
* Otherwise convert it to Tensor or RaggedTensor.
Args:
value: An object of `Tensor`, `RaggedTensor` or registered RaggedTensor
value types, or an object whose type has a registered `Tensor` conversion
function.
Returns:
An object of `Tensor`, `RaggedTensor` or registered RaggedTensor
value types
"""
if _is_supported_ragged_values_type(value):
return value
else:
return convert_to_tensor_or_ragged_tensor(value, name="values")
# ===============================================================================
# Register RaggedTensor for use with session.run.
# ===============================================================================
def _ragged_tensor_value_from_components(components):
components = list(components)
value = components.pop()
while components:
value = ragged_tensor_value.RaggedTensorValue(value, components.pop())
return value
def _ragged_tensor_session_fetch(rt):
components = rt.nested_row_splits + (rt.flat_values,)
return (components, _ragged_tensor_value_from_components)
def _ragged_tensor_session_feed(feed_key, feed_val):
key_components = feed_key.nested_row_splits + (feed_key.flat_values,)
val_components = feed_val.nested_row_splits + (feed_val.flat_values,)
return zip(key_components, val_components)
def _ragged_tensor_session_feed_for_partial_run(feed_key):
return feed_key.nested_row_splits + (feed_key.flat_values,)
session.register_session_run_conversion_functions(
RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed,
_ragged_tensor_session_feed_for_partial_run)
# ===============================================================================
# RaggedTensorType
# ===============================================================================
| RaggedTensorSpec |
python | scipy__scipy | scipy/linalg/tests/test_blas.py | {
"start": 31396,
"end": 32895
} | class ____:
"""Quick and simple tests for (zc)-symm, syrk, syr2k."""
def setup_method(self):
self.sigma_y = np.array([[0., -1.j],
[1.j, 0.]])
@parametrize_blas(fblas, "symm", "zc")
def test_symm(self, f, dtype):
# NB: a is symmetric w/upper diag of ONLY
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([1, -1]))
@parametrize_blas(fblas, "hemm", "zc")
def test_hemm(self, f, dtype):
# NB: a is hermitian w/upper diag of ONLY
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([1, 1]))
@parametrize_blas(fblas, "syrk", "zc")
def test_syrk(self, f, dtype):
res = f(a=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([-1, -1]))
@parametrize_blas(fblas, "herk", "zc")
def test_herk(self, f, dtype):
res = f(a=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([1, 1]))
@parametrize_blas(fblas, "syr2k", "zc")
def test_syr2k_zr(self, f, dtype):
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), 2.*np.diag([-1, -1]))
@parametrize_blas(fblas, "her2k", "zc")
def test_her2k_zr(self, f, dtype):
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), 2.*np.diag([1, 1]))
| TestSyHe |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_wx.py | {
"start": 38356,
"end": 44431
} | class ____(NavigationToolbar2, wx.ToolBar):
def __init__(self, canvas, coordinates=True, *, style=wx.TB_BOTTOM):
wx.ToolBar.__init__(self, canvas.GetParent(), -1, style=style)
if wx.Platform == '__WXMAC__':
self.SetToolBitmapSize(self.GetToolBitmapSize()*self.GetDPIScaleFactor())
self.wx_ids = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.AddSeparator()
continue
self.wx_ids[text] = (
self.AddTool(
-1,
bitmap=self._icon(f"{image_file}.svg"),
bmpDisabled=wx.NullBitmap,
label=text, shortHelp=tooltip_text,
kind=(wx.ITEM_CHECK if text in ["Pan", "Zoom"]
else wx.ITEM_NORMAL))
.Id)
self.Bind(wx.EVT_TOOL, getattr(self, callback),
id=self.wx_ids[text])
self._coordinates = coordinates
if self._coordinates:
self.AddStretchableSpace()
self._label_text = wx.StaticText(self, style=wx.ALIGN_RIGHT)
self.AddControl(self._label_text)
self.Realize()
NavigationToolbar2.__init__(self, canvas)
@staticmethod
def _icon(name):
"""
Construct a `wx.Bitmap` suitable for use as icon from an image file
*name*, including the extension and relative to Matplotlib's "images"
data directory.
"""
try:
dark = wx.SystemSettings.GetAppearance().IsDark()
except AttributeError: # wxpython < 4.1
# copied from wx's IsUsingDarkBackground / GetLuminance.
bg = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)
fg = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT)
# See wx.Colour.GetLuminance.
bg_lum = (.299 * bg.red + .587 * bg.green + .114 * bg.blue) / 255
fg_lum = (.299 * fg.red + .587 * fg.green + .114 * fg.blue) / 255
dark = fg_lum - bg_lum > .2
path = cbook._get_data_path('images', name)
if path.suffix == '.svg':
svg = path.read_bytes()
if dark:
svg = svg.replace(b'fill:black;', b'fill:white;')
toolbarIconSize = wx.ArtProvider().GetDIPSizeHint(wx.ART_TOOLBAR)
return wx.BitmapBundle.FromSVG(svg, toolbarIconSize)
else:
pilimg = PIL.Image.open(path)
# ensure RGBA as wx BitMap expects RGBA format
image = np.array(pilimg.convert("RGBA"))
if dark:
fg = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT)
black_mask = (image[..., :3] == 0).all(axis=-1)
image[black_mask, :3] = (fg.Red(), fg.Green(), fg.Blue())
return wx.Bitmap.FromBufferRGBA(
image.shape[1], image.shape[0], image.tobytes())
def _update_buttons_checked(self):
if "Pan" in self.wx_ids:
self.ToggleTool(self.wx_ids["Pan"], self.mode.name == "PAN")
if "Zoom" in self.wx_ids:
self.ToggleTool(self.wx_ids["Zoom"], self.mode.name == "ZOOM")
def zoom(self, *args):
super().zoom(*args)
self._update_buttons_checked()
def pan(self, *args):
super().pan(*args)
self._update_buttons_checked()
def save_figure(self, *args):
# Fetch the required filename and file type.
filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
default_file = self.canvas.get_default_filename()
dialog = wx.FileDialog(
self.canvas.GetParent(), "Save to file",
mpl.rcParams["savefig.directory"], default_file, filetypes,
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
dialog.SetFilterIndex(filter_index)
if dialog.ShowModal() == wx.ID_OK:
path = pathlib.Path(dialog.GetPath())
_log.debug('%s - Save file path: %s', type(self), path)
fmt = exts[dialog.GetFilterIndex()]
ext = path.suffix[1:]
if ext in self.canvas.get_supported_filetypes() and fmt != ext:
# looks like they forgot to set the image type drop
# down, going with the extension.
_log.warning('extension %s did not match the selected '
'image type %s; going with %s',
ext, fmt, ext)
fmt = ext
# Save dir for next time, unless empty str (which means use cwd).
if mpl.rcParams["savefig.directory"]:
mpl.rcParams["savefig.directory"] = str(path.parent)
try:
self.canvas.figure.savefig(path, format=fmt)
return path
except Exception as e:
dialog = wx.MessageDialog(
parent=self.canvas.GetParent(), message=str(e),
caption='Matplotlib error')
dialog.ShowModal()
dialog.Destroy()
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
sf = 1 if wx.Platform == '__WXMSW__' else self.canvas.GetDPIScaleFactor()
self.canvas._rubberband_rect = (x0/sf, (height - y0)/sf,
x1/sf, (height - y1)/sf)
self.canvas.Refresh()
def remove_rubberband(self):
self.canvas._rubberband_rect = None
self.canvas.Refresh()
def set_message(self, s):
if self._coordinates:
self._label_text.SetLabel(s)
def set_history_buttons(self):
can_backward = self._nav_stack._pos > 0
can_forward = self._nav_stack._pos < len(self._nav_stack) - 1
if 'Back' in self.wx_ids:
self.EnableTool(self.wx_ids['Back'], can_backward)
if 'Forward' in self.wx_ids:
self.EnableTool(self.wx_ids['Forward'], can_forward)
# tools for matplotlib.backend_managers.ToolManager:
| NavigationToolbar2Wx |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 133823,
"end": 136957
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of AddPullRequestReviewComment"""
__schema__ = github_schema
__field_names__ = (
"pull_request_id",
"pull_request_review_id",
"commit_oid",
"body",
"path",
"position",
"in_reply_to",
"client_mutation_id",
)
pull_request_id = sgqlc.types.Field(ID, graphql_name="pullRequestId")
"""The node ID of the pull request reviewing **Upcoming Change on
2023-10-01 UTC** **Description:** `pullRequestId` will be removed.
use addPullRequestReviewThread or addPullRequestReviewThreadReply
instead **Reason:** We are deprecating the
addPullRequestReviewComment mutation
"""
pull_request_review_id = sgqlc.types.Field(ID, graphql_name="pullRequestReviewId")
"""The Node ID of the review to modify. **Upcoming Change on
2023-10-01 UTC** **Description:** `pullRequestReviewId` will be
removed. use addPullRequestReviewThread or
addPullRequestReviewThreadReply instead **Reason:** We are
deprecating the addPullRequestReviewComment mutation
"""
commit_oid = sgqlc.types.Field(GitObjectID, graphql_name="commitOID")
"""The SHA of the commit to comment on. **Upcoming Change on
2023-10-01 UTC** **Description:** `commitOID` will be removed. use
addPullRequestReviewThread or addPullRequestReviewThreadReply
instead **Reason:** We are deprecating the
addPullRequestReviewComment mutation
"""
body = sgqlc.types.Field(String, graphql_name="body")
"""The text of the comment. This field is required **Upcoming Change
on 2023-10-01 UTC** **Description:** `body` will be removed. use
addPullRequestReviewThread or addPullRequestReviewThreadReply
instead **Reason:** We are deprecating the
addPullRequestReviewComment mutation
"""
path = sgqlc.types.Field(String, graphql_name="path")
"""The relative path of the file to comment on. **Upcoming Change on
2023-10-01 UTC** **Description:** `path` will be removed. use
addPullRequestReviewThread or addPullRequestReviewThreadReply
instead **Reason:** We are deprecating the
addPullRequestReviewComment mutation
"""
position = sgqlc.types.Field(Int, graphql_name="position")
"""The line index in the diff to comment on. **Upcoming Change on
2023-10-01 UTC** **Description:** `position` will be removed. use
addPullRequestReviewThread or addPullRequestReviewThreadReply
instead **Reason:** We are deprecating the
addPullRequestReviewComment mutation
"""
in_reply_to = sgqlc.types.Field(ID, graphql_name="inReplyTo")
"""The comment id to reply to. **Upcoming Change on 2023-10-01 UTC**
**Description:** `inReplyTo` will be removed. use
addPullRequestReviewThread or addPullRequestReviewThreadReply
instead **Reason:** We are deprecating the
addPullRequestReviewComment mutation
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| AddPullRequestReviewCommentInput |
python | apache__airflow | providers/standard/tests/unit/standard/triggers/test_hitl.py | {
"start": 1934,
"end": 8641
} | class ____:
def test_serialization(self, default_trigger_args):
trigger = HITLTrigger(
defaults=["1"],
timeout_datetime=None,
poke_interval=50.0,
**default_trigger_args,
)
classpath, kwargs = trigger.serialize()
expected_params_in_trigger_kwargs: dict[str, dict[str, Any]]
if AIRFLOW_V_3_2_PLUS:
expected_params_in_trigger_kwargs = {
"input": {"value": 1, "description": None, "schema": {}, "source": "task"}
}
else:
expected_params_in_trigger_kwargs = {"input": {"value": 1, "description": None, "schema": {}}}
assert classpath == "airflow.providers.standard.triggers.hitl.HITLTrigger"
assert kwargs == {
"ti_id": TI_ID,
"options": ["1", "2", "3", "4", "5"],
"params": expected_params_in_trigger_kwargs,
"defaults": ["1"],
"multiple": False,
"timeout_datetime": None,
"poke_interval": 50.0,
}
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("airflow.sdk.execution_time.hitl.update_hitl_detail_response")
async def test_run_failed_due_to_timeout(self, mock_update, mock_supervisor_comms, default_trigger_args):
trigger = HITLTrigger(
timeout_datetime=utcnow() + timedelta(seconds=0.1),
poke_interval=5,
**default_trigger_args,
)
mock_supervisor_comms.send.return_value = HITLDetailResponse(
response_received=False,
responded_by_user=None,
responded_at=None,
chosen_options=None,
params_input={},
)
gen = trigger.run()
await asyncio.sleep(0.3)
trigger_task = asyncio.create_task(gen.__anext__())
event = await trigger_task
assert event == TriggerEvent(
HITLTriggerEventFailurePayload(
error="The timeout has passed, and the response has not yet been received.",
error_type="timeout",
)
)
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch.object(HITLTrigger, "log")
@mock.patch("airflow.sdk.execution_time.hitl.update_hitl_detail_response")
async def test_run_fallback_to_default_due_to_timeout(
self, mock_update, mock_log, mock_supervisor_comms, default_trigger_args
):
trigger = HITLTrigger(
defaults=["1"],
timeout_datetime=utcnow() + timedelta(seconds=0.1),
poke_interval=5,
**default_trigger_args,
)
mock_supervisor_comms.send.return_value = HITLDetailResponse(
response_received=False,
responded_by_user=None,
responded_at=None,
chosen_options=None,
params_input={},
)
gen = trigger.run()
await asyncio.sleep(0.3)
trigger_task = asyncio.create_task(gen.__anext__())
event = await trigger_task
assert event == TriggerEvent(
HITLTriggerEventSuccessPayload(
chosen_options=["1"],
params_input={"input": 1},
responded_by_user=None,
responded_at=mock.ANY,
timedout=True,
)
)
assert mock_log.info.call_args == mock.call(
"[HITL] timeout reached before receiving response, fallback to default %s", ["1"]
)
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch.object(HITLTrigger, "log")
@mock.patch("airflow.sdk.execution_time.hitl.update_hitl_detail_response")
async def test_run_should_check_response_in_timeout_handler(
self, mock_update, mock_log, mock_supervisor_comms, default_trigger_args
):
# action time only slightly before timeout
action_datetime = utcnow() + timedelta(seconds=0.1)
timeout_datetime = utcnow() + timedelta(seconds=0.1)
trigger = HITLTrigger(
defaults=["1"],
timeout_datetime=timeout_datetime,
poke_interval=5,
**default_trigger_args,
)
mock_supervisor_comms.send.return_value = HITLDetailResponse(
response_received=True,
responded_by_user=HITLUser(id="1", name="test"),
responded_at=action_datetime,
chosen_options=["2"],
params_input={},
)
gen = trigger.run()
await asyncio.sleep(0.3)
trigger_task = asyncio.create_task(gen.__anext__())
event = await trigger_task
assert event == TriggerEvent(
HITLTriggerEventSuccessPayload(
chosen_options=["2"],
params_input={},
responded_at=mock.ANY,
responded_by_user={"id": "1", "name": "test"},
timedout=False,
)
)
assert mock_log.info.call_args == mock.call(
"[HITL] responded_by=%s (id=%s) options=%s at %s (timeout fallback skipped)",
"test",
"1",
["2"],
action_datetime,
)
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch.object(HITLTrigger, "log")
@mock.patch("airflow.sdk.execution_time.hitl.update_hitl_detail_response")
async def test_run(
self, mock_update, mock_log, mock_supervisor_comms, time_machine, default_trigger_args
):
time_machine.move_to(datetime(2025, 7, 29, 2, 0, 0))
trigger = HITLTrigger(
defaults=["1"],
timeout_datetime=None,
poke_interval=5,
**default_trigger_args,
)
mock_supervisor_comms.send.return_value = HITLDetailResponse(
response_received=True,
responded_by_user=HITLUser(id="test", name="test"),
responded_at=utcnow(),
chosen_options=["3"],
params_input={"input": 50},
)
gen = trigger.run()
await asyncio.sleep(0.3)
trigger_task = asyncio.create_task(gen.__anext__())
event = await trigger_task
assert event == TriggerEvent(
HITLTriggerEventSuccessPayload(
chosen_options=["3"],
params_input={"input": 50},
responded_at=mock.ANY,
responded_by_user={"id": "test", "name": "test"},
timedout=False,
)
)
assert mock_log.info.call_args == mock.call(
"[HITL] responded_by=%s (id=%s) options=%s at %s",
"test",
"test",
["3"],
datetime(2025, 7, 29, 2, 0, 0, tzinfo=utc),
)
| TestHITLTrigger |
python | langchain-ai__langchain | libs/text-splitters/langchain_text_splitters/html.py | {
"start": 18658,
"end": 37603
} | class ____(BaseDocumentTransformer):
"""Split HTML content preserving semantic structure.
Splits HTML content by headers into generalized chunks, preserving semantic
structure. If chunks exceed the maximum chunk size, it uses
RecursiveCharacterTextSplitter for further splitting.
The splitter preserves full HTML elements and converts links to Markdown-like links.
It can also preserve images, videos, and audio elements by converting them into
Markdown format. Note that some chunks may exceed the maximum size to maintain
semantic integrity.
!!! version-added "Added in `langchain-text-splitters` 0.3.5"
Example:
```python
from langchain_text_splitters.html import HTMLSemanticPreservingSplitter
def custom_iframe_extractor(iframe_tag):
```
Custom handler function to extract the 'src' attribute from an <iframe> tag.
Converts the iframe to a Markdown-like link: [iframe:<src>](src).
Args:
iframe_tag (bs4.element.Tag): The <iframe> tag to be processed.
Returns:
str: A formatted string representing the iframe in Markdown-like format.
```
iframe_src = iframe_tag.get('src', '')
return f"[iframe:{iframe_src}]({iframe_src})"
text_splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")],
max_chunk_size=500,
preserve_links=True,
preserve_images=True,
custom_handlers={"iframe": custom_iframe_extractor}
)
```
""" # noqa: D214
def __init__(
self,
headers_to_split_on: list[tuple[str, str]],
*,
max_chunk_size: int = 1000,
chunk_overlap: int = 0,
separators: list[str] | None = None,
elements_to_preserve: list[str] | None = None,
preserve_links: bool = False,
preserve_images: bool = False,
preserve_videos: bool = False,
preserve_audio: bool = False,
custom_handlers: dict[str, Callable[[Tag], str]] | None = None,
stopword_removal: bool = False,
stopword_lang: str = "english",
normalize_text: bool = False,
external_metadata: dict[str, str] | None = None,
allowlist_tags: list[str] | None = None,
denylist_tags: list[str] | None = None,
preserve_parent_metadata: bool = False,
keep_separator: bool | Literal["start", "end"] = True,
) -> None:
"""Initialize splitter.
Args:
headers_to_split_on: HTML headers (e.g., `h1`, `h2`)
that define content sections.
max_chunk_size: Maximum size for each chunk, with allowance for
exceeding this limit to preserve semantics.
chunk_overlap: Number of characters to overlap between chunks to ensure
contextual continuity.
separators: Delimiters used by `RecursiveCharacterTextSplitter` for
further splitting.
elements_to_preserve: HTML tags (e.g., `table`, `ul`) to remain
intact during splitting.
preserve_links: Converts `a` tags to Markdown links (`[text](url)`).
preserve_images: Converts `img` tags to Markdown images (``).
preserve_videos: Converts `video` tags to Markdown
video links (``).
preserve_audio: Converts `audio` tags to Markdown
audio links (``).
custom_handlers: Optional custom handlers for
specific HTML tags, allowing tailored extraction or processing.
stopword_removal: Optionally remove stopwords from the text.
stopword_lang: The language of stopwords to remove.
normalize_text: Optionally normalize text
(e.g., lowercasing, removing punctuation).
external_metadata: Additional metadata to attach to
the Document objects.
allowlist_tags: Only these tags will be retained in
the HTML.
denylist_tags: These tags will be removed from the HTML.
preserve_parent_metadata: Whether to pass through parent document
metadata to split documents when calling
`transform_documents/atransform_documents()`.
keep_separator: Whether separators
should be at the beginning of a chunk, at the end, or not at all.
"""
if not _HAS_BS4:
msg = (
"Could not import BeautifulSoup. "
"Please install it with 'pip install bs4'."
)
raise ImportError(msg)
self._headers_to_split_on = sorted(headers_to_split_on)
self._max_chunk_size = max_chunk_size
self._elements_to_preserve = elements_to_preserve or []
self._preserve_links = preserve_links
self._preserve_images = preserve_images
self._preserve_videos = preserve_videos
self._preserve_audio = preserve_audio
self._custom_handlers = custom_handlers or {}
self._stopword_removal = stopword_removal
self._stopword_lang = stopword_lang
self._normalize_text = normalize_text
self._external_metadata = external_metadata or {}
self._allowlist_tags = allowlist_tags
self._preserve_parent_metadata = preserve_parent_metadata
self._keep_separator = keep_separator
if allowlist_tags:
self._allowlist_tags = list(
set(allowlist_tags + [header[0] for header in headers_to_split_on])
)
self._denylist_tags = denylist_tags
if denylist_tags:
self._denylist_tags = [
tag
for tag in denylist_tags
if tag not in [header[0] for header in headers_to_split_on]
]
if separators:
self._recursive_splitter = RecursiveCharacterTextSplitter(
separators=separators,
keep_separator=keep_separator,
chunk_size=max_chunk_size,
chunk_overlap=chunk_overlap,
)
else:
self._recursive_splitter = RecursiveCharacterTextSplitter(
keep_separator=keep_separator,
chunk_size=max_chunk_size,
chunk_overlap=chunk_overlap,
)
if self._stopword_removal:
if not _HAS_NLTK:
msg = (
"Could not import nltk. Please install it with 'pip install nltk'."
)
raise ImportError(msg)
nltk.download("stopwords")
self._stopwords = set(nltk.corpus.stopwords.words(self._stopword_lang))
def split_text(self, text: str) -> list[Document]:
"""Splits the provided HTML text into smaller chunks based on the configuration.
Args:
text: The HTML content to be split.
Returns:
A list of `Document` objects containing the split content.
"""
soup = BeautifulSoup(text, "html.parser")
self._process_media(soup)
if self._preserve_links:
self._process_links(soup)
if self._allowlist_tags or self._denylist_tags:
self._filter_tags(soup)
return self._process_html(soup)
@override
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> list[Document]:
"""Transform sequence of documents by splitting them."""
transformed = []
for doc in documents:
splits = self.split_text(doc.page_content)
if self._preserve_parent_metadata:
splits = [
Document(
page_content=split_doc.page_content,
metadata={**doc.metadata, **split_doc.metadata},
)
for split_doc in splits
]
transformed.extend(splits)
return transformed
def _process_media(self, soup: BeautifulSoup) -> None:
"""Processes the media elements.
Process elements in the HTML content by wrapping them in a <media-wrapper> tag
and converting them to Markdown format.
Args:
soup: Parsed HTML content using BeautifulSoup.
"""
if self._preserve_images:
for img_tag in _find_all_tags(soup, name="img"):
img_src = img_tag.get("src", "")
markdown_img = f""
wrapper = soup.new_tag("media-wrapper")
wrapper.string = markdown_img
img_tag.replace_with(wrapper)
if self._preserve_videos:
for video_tag in _find_all_tags(soup, name="video"):
video_src = video_tag.get("src", "")
markdown_video = f""
wrapper = soup.new_tag("media-wrapper")
wrapper.string = markdown_video
video_tag.replace_with(wrapper)
if self._preserve_audio:
for audio_tag in _find_all_tags(soup, name="audio"):
audio_src = audio_tag.get("src", "")
markdown_audio = f""
wrapper = soup.new_tag("media-wrapper")
wrapper.string = markdown_audio
audio_tag.replace_with(wrapper)
def _process_links(self, soup: BeautifulSoup) -> None:
"""Processes the links in the HTML content.
Args:
soup: Parsed HTML content using BeautifulSoup.
"""
for a_tag in _find_all_tags(soup, name="a"):
a_href = a_tag.get("href", "")
a_text = a_tag.get_text(strip=True)
markdown_link = f"[{a_text}]({a_href})"
wrapper = soup.new_tag("link-wrapper")
wrapper.string = markdown_link
a_tag.replace_with(NavigableString(markdown_link))
def _filter_tags(self, soup: BeautifulSoup) -> None:
"""Filters the HTML content based on the allowlist and denylist tags.
Args:
soup: Parsed HTML content using BeautifulSoup.
"""
if self._allowlist_tags:
for tag in _find_all_tags(soup, name=True):
if tag.name not in self._allowlist_tags:
tag.decompose()
if self._denylist_tags:
for tag in _find_all_tags(soup, name=self._denylist_tags):
tag.decompose()
def _normalize_and_clean_text(self, text: str) -> str:
"""Normalizes the text by removing extra spaces and newlines.
Args:
text: The text to be normalized.
Returns:
The normalized text.
"""
if self._normalize_text:
text = text.lower()
text = re.sub(r"[^\w\s]", "", text)
text = re.sub(r"\s+", " ", text).strip()
if self._stopword_removal:
text = " ".join(
[word for word in text.split() if word not in self._stopwords]
)
return text
def _process_html(self, soup: BeautifulSoup) -> list[Document]:
"""Processes the HTML content using BeautifulSoup and splits it using headers.
Args:
soup: Parsed HTML content using BeautifulSoup.
Returns:
A list of `Document` objects containing the split content.
"""
documents: list[Document] = []
current_headers: dict[str, str] = {}
current_content: list[str] = []
preserved_elements: dict[str, str] = {}
placeholder_count: int = 0
def _get_element_text(element: PageElement) -> str:
"""Recursively extracts and processes the text of an element.
Applies custom handlers where applicable, and ensures correct spacing.
Args:
element: The HTML element to process.
Returns:
The processed text of the element.
"""
element = cast("Tag | NavigableString", element)
if element.name in self._custom_handlers:
return self._custom_handlers[element.name](element)
text = ""
if element.name is not None:
for child in element.children:
child_text = _get_element_text(child).strip()
if text and child_text:
text += " "
text += child_text
elif element.string:
text += element.string
return self._normalize_and_clean_text(text)
elements = _find_all_tags(soup, recursive=False)
def _process_element(
element: ResultSet[Tag],
documents: list[Document],
current_headers: dict[str, str],
current_content: list[str],
preserved_elements: dict[str, str],
placeholder_count: int,
) -> tuple[list[Document], dict[str, str], list[str], dict[str, str], int]:
for elem in element:
if elem.name.lower() in {"html", "body", "div", "main"}:
children = _find_all_tags(elem, recursive=False)
(
documents,
current_headers,
current_content,
preserved_elements,
placeholder_count,
) = _process_element(
children,
documents,
current_headers,
current_content,
preserved_elements,
placeholder_count,
)
content = " ".join(_find_all_strings(elem, recursive=False))
if content:
content = self._normalize_and_clean_text(content)
current_content.append(content)
continue
if elem.name in [h[0] for h in self._headers_to_split_on]:
if current_content:
documents.extend(
self._create_documents(
current_headers,
" ".join(current_content),
preserved_elements,
)
)
current_content.clear()
preserved_elements.clear()
header_name = elem.get_text(strip=True)
current_headers = {
dict(self._headers_to_split_on)[elem.name]: header_name
}
elif elem.name in self._elements_to_preserve:
placeholder = f"PRESERVED_{placeholder_count}"
preserved_elements[placeholder] = _get_element_text(elem)
current_content.append(placeholder)
placeholder_count += 1
else:
content = _get_element_text(elem)
if content:
current_content.append(content)
return (
documents,
current_headers,
current_content,
preserved_elements,
placeholder_count,
)
# Process the elements
(
documents,
current_headers,
current_content,
preserved_elements,
placeholder_count,
) = _process_element(
elements,
documents,
current_headers,
current_content,
preserved_elements,
placeholder_count,
)
# Handle any remaining content
if current_content:
documents.extend(
self._create_documents(
current_headers,
" ".join(current_content),
preserved_elements,
)
)
return documents
def _create_documents(
self, headers: dict[str, str], content: str, preserved_elements: dict[str, str]
) -> list[Document]:
"""Creates Document objects from the provided headers, content, and elements.
Args:
headers: The headers to attach as metadata to the Document.
content: The content of the Document.
preserved_elements: Preserved elements to be reinserted into the content.
Returns:
A list of `Document` objects.
"""
content = re.sub(r"\s+", " ", content).strip()
metadata = {**headers, **self._external_metadata}
if len(content) <= self._max_chunk_size:
page_content = self._reinsert_preserved_elements(
content, preserved_elements
)
return [Document(page_content=page_content, metadata=metadata)]
return self._further_split_chunk(content, metadata, preserved_elements)
def _further_split_chunk(
self, content: str, metadata: dict[Any, Any], preserved_elements: dict[str, str]
) -> list[Document]:
"""Further splits the content into smaller chunks.
Args:
content: The content to be split.
metadata: Metadata to attach to each chunk.
preserved_elements: Preserved elements to be reinserted into each chunk.
Returns:
A list of `Document` objects containing the split content.
"""
splits = self._recursive_splitter.split_text(content)
result = []
for split in splits:
split_with_preserved = self._reinsert_preserved_elements(
split, preserved_elements
)
if split_with_preserved.strip():
result.append(
Document(
page_content=split_with_preserved.strip(),
metadata=metadata,
)
)
return result
def _reinsert_preserved_elements(
self, content: str, preserved_elements: dict[str, str]
) -> str:
"""Reinserts preserved elements into the content into their original positions.
Args:
content: The content where placeholders need to be replaced.
preserved_elements: Preserved elements to be reinserted.
Returns:
The content with placeholders replaced by preserved elements.
"""
for placeholder, preserved_content in preserved_elements.items():
content = content.replace(placeholder, preserved_content.strip())
return content
# %%
| HTMLSemanticPreservingSplitter |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli_tests/cli_tests/api_tests/secret_tests/test_business_logic.py | {
"start": 441,
"end": 8948
} | class ____:
"""Test the secret formatting functions."""
def _create_sample_secret_list(self):
"""Create sample SecretList for testing."""
secrets = [
DgApiSecret(
id="secret-1-uuid-12345",
secretName="database_password",
secretValue="super_secret_value", # Would be hidden in actual output
locationNames=["data_pipeline", "analytics"],
fullDeploymentScope=True,
allBranchDeploymentsScope=False,
specificBranchDeploymentScope=None,
localDeploymentScope=False,
canViewSecretValue=True,
canEditSecret=True,
updatedBy=DgApiUpdatedBy(email="admin@company.com"),
updateTimestamp=datetime(2022, 1, 1, 14, 20, 0), # UTC to avoid timezone edge cases
),
DgApiSecret(
id="secret-2-uuid-67890",
secretName="api_key",
secretValue=None, # No value - might not have permission to view
locationNames=[], # All locations
fullDeploymentScope=False,
allBranchDeploymentsScope=True,
specificBranchDeploymentScope=None,
localDeploymentScope=False,
canViewSecretValue=False,
canEditSecret=False,
updatedBy=None,
updateTimestamp=None,
),
DgApiSecret(
id="secret-3-uuid-abcdef",
secretName="staging_token",
secretValue="staging_value_123",
locationNames=["staging_app"],
fullDeploymentScope=False,
allBranchDeploymentsScope=False,
specificBranchDeploymentScope="staging",
localDeploymentScope=True,
canViewSecretValue=True,
canEditSecret=True,
updatedBy=DgApiUpdatedBy(email="dev@company.com"),
updateTimestamp=datetime(2022, 1, 1, 14, 21, 0), # UTC
),
]
return DgApiSecretList(items=secrets, total=3)
def _create_empty_secret_list(self):
"""Create empty SecretList for testing."""
return DgApiSecretList(items=[], total=0)
def _create_single_secret(self):
"""Create single Secret for testing."""
return DgApiSecret(
id="single-secret-uuid-xyz",
secretName="development_secret",
secretValue="dev_secret_value",
locationNames=["dev_pipeline", "test_pipeline"],
fullDeploymentScope=False,
allBranchDeploymentsScope=False,
specificBranchDeploymentScope=None,
localDeploymentScope=True,
canViewSecretValue=True,
canEditSecret=True,
updatedBy=DgApiUpdatedBy(email="developer@company.com"),
updateTimestamp=datetime(2022, 1, 1, 14, 20, 0),
)
def test_format_secrets_text_output(self, snapshot):
"""Test formatting secrets as text."""
from dagster_shared.utils.timing import fixed_timezone
secret_list = self._create_sample_secret_list()
with fixed_timezone("UTC"):
result = format_secrets(secret_list, as_json=False)
# Snapshot the entire text output
snapshot.assert_match(result)
def test_format_secrets_json_output(self, snapshot):
"""Test formatting secrets as JSON."""
secret_list = self._create_sample_secret_list()
result = format_secrets(secret_list, as_json=True)
# For JSON, we want to snapshot the parsed structure to avoid formatting differences
import json
parsed = json.loads(result)
snapshot.assert_match(parsed)
def test_format_empty_secrets_text_output(self, snapshot):
"""Test formatting empty secret list as text."""
secret_list = self._create_empty_secret_list()
result = format_secrets(secret_list, as_json=False)
snapshot.assert_match(result)
def test_format_empty_secrets_json_output(self, snapshot):
"""Test formatting empty secret list as JSON."""
secret_list = self._create_empty_secret_list()
result = format_secrets(secret_list, as_json=True)
import json
parsed = json.loads(result)
snapshot.assert_match(parsed)
def test_format_single_secret_text_output(self, snapshot):
"""Test formatting single secret as text."""
from dagster_shared.utils.timing import fixed_timezone
secret = self._create_single_secret()
with fixed_timezone("UTC"):
result = format_secret(secret, as_json=False)
snapshot.assert_match(result)
def test_format_single_secret_text_output_with_value(self, snapshot):
"""Test formatting single secret as text with value shown."""
from dagster_shared.utils.timing import fixed_timezone
secret = self._create_single_secret()
with fixed_timezone("UTC"):
result = format_secret(secret, as_json=False, show_value=True)
snapshot.assert_match(result)
def test_format_single_secret_json_output(self, snapshot):
"""Test formatting single secret as JSON."""
secret = self._create_single_secret()
result = format_secret(secret, as_json=True)
import json
parsed = json.loads(result)
snapshot.assert_match(parsed)
def test_format_single_secret_json_output_with_value(self, snapshot):
"""Test formatting single secret as JSON with value shown."""
secret = self._create_single_secret()
result = format_secret(secret, as_json=True, show_value=True)
import json
parsed = json.loads(result)
snapshot.assert_match(parsed)
def test_format_secret_without_metadata(self, snapshot):
"""Test formatting secret with no metadata."""
from dagster_shared.utils.timing import fixed_timezone
secret = DgApiSecret(
id="simple-secret-uuid",
secretName="simple_secret",
secretValue="simple_value",
locationNames=[],
fullDeploymentScope=True,
allBranchDeploymentsScope=False,
specificBranchDeploymentScope=None,
localDeploymentScope=False,
canViewSecretValue=True,
canEditSecret=True,
updatedBy=None,
updateTimestamp=None,
)
with fixed_timezone("UTC"):
result = format_secret(secret, as_json=False)
snapshot.assert_match(result)
def test_format_secret_with_all_scopes(self, snapshot):
"""Test formatting secret with all scope types enabled."""
from dagster_shared.utils.timing import fixed_timezone
secret = DgApiSecret(
id="multi-scope-secret-uuid",
secretName="multi_scope_secret",
secretValue="multi_scope_value",
locationNames=["location1", "location2"],
fullDeploymentScope=True,
allBranchDeploymentsScope=True,
specificBranchDeploymentScope="feature-branch",
localDeploymentScope=True,
canViewSecretValue=True,
canEditSecret=False,
updatedBy=DgApiUpdatedBy(email="system@company.com"),
updateTimestamp=datetime(2022, 1, 1, 14, 22, 0),
)
with fixed_timezone("UTC"):
result = format_secret(secret, as_json=False)
snapshot.assert_match(result)
def test_format_secret_no_permissions(self, snapshot):
"""Test formatting secret with no permissions."""
from dagster_shared.utils.timing import fixed_timezone
secret = DgApiSecret(
id="no-perm-secret-uuid",
secretName="restricted_secret",
secretValue=None, # No access to value
locationNames=["secure_location"],
fullDeploymentScope=False,
allBranchDeploymentsScope=False,
specificBranchDeploymentScope=None,
localDeploymentScope=False,
canViewSecretValue=False,
canEditSecret=False,
updatedBy=DgApiUpdatedBy(email="admin@company.com"),
updateTimestamp=datetime(2022, 1, 1, 14, 23, 0),
)
with fixed_timezone("UTC"):
result = format_secret(secret, as_json=False)
snapshot.assert_match(result)
| TestFormatSecrets |
python | pytorch__pytorch | torchgen/static_runtime/generator.py | {
"start": 19279,
"end": 22591
} | class ____:
def out_variant(
self, groups: Sequence[NativeFunctionsGroup], backend_index: BackendIndex
) -> str:
if not groups:
return ""
generated_type_variants = []
for g in groups:
with native_function_manager(g):
assert is_supported(g)
assert isinstance(g, NativeFunctionsGroup)
generated_type_variant = self.out_variant_op_generator(g, backend_index)
generated_type_variants.append(generated_type_variant)
op_name = op_name_from_group(groups[0])
body = "\n".join(generated_type_variants)
generated = f"""
REGISTER_OPERATOR_FUNCTOR(
aten::{op_name},
aten_{op_name},
[](Node* n) -> SROperator {{
{body}
LogAndDumpSchema(n);
return nullptr;
}})
"""
return generated
def view(
self, groups: Sequence[NativeFunctionsViewGroup], backend_index: BackendIndex
) -> str:
if not groups:
return ""
generated_type_variants = []
for g in groups:
with native_function_manager(g):
assert is_supported(g)
assert isinstance(g, NativeFunctionsViewGroup)
generated_type_variant = self.view_op_generator(g, backend_index)
generated_type_variants.append(generated_type_variant)
op_name = config.func_name_base_str(groups[0])
body = "\n".join(generated_type_variants)
generated = f"""
REGISTER_NATIVE_OPERATOR_FUNCTOR(
aten::{op_name},
aten_{op_name},
[](Node* n) -> SROperator {{
{body}
LogAndDumpSchema(n);
return nullptr;
}});
"""
return generated
def out_variant_op_generator(
self, g: NativeFunctionsGroup, backend_index: BackendIndex
) -> str:
functional = g.functional
schema = str(functional.func)
populated_argument = generate_arg_extraction(g.functional.func)
functional_variant_call = generate_non_out_variant_call(g, backend_index)
assert len(g.out.func.arguments.out) == 1
out_variable_name = str(g.out.func.arguments.out[0].name)
out_variant_call = generate_out_variant_call(g, backend_index)
generated = f"""
if (n->matches(torch::schema("aten::{schema}"))) {{
return [](ProcessedNode* p_node) {{
{populated_argument}
if (p_node->Output(0).isNone()) {{
p_node->Output(0) = {functional_variant_call};
return;
}}
auto& {out_variable_name} = p_node->Output(0).toTensor();
fastResizeToZero({out_variable_name});
{out_variant_call};
}};
}}"""
return generated
def view_op_generator(
self, g: NativeFunctionsViewGroup, backend_index: BackendIndex
) -> str:
schema = str(g.view.func)
populated_argument = generate_arg_extraction(g.view.func)
functional_variant_call = generate_call_to_view_ops(g, backend_index)
generated = f"""
if (n->matches(torch::schema("aten::{schema}"))) {{
return [](ProcessedNode* p_node) {{
{populated_argument}
p_node->Output(0) = {functional_variant_call};
}};
}}"""
return generated
| GenOpDispatcher |
python | tensorflow__tensorflow | tensorflow/python/data/ops/options.py | {
"start": 3256,
"end": 5612
} | class ____(enum.IntEnum):
"""Represents the type of auto-sharding to use.
OFF: No sharding will be performed.
AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding.
FILE: Shards by input files (i.e. each worker will get a set of files to
process). When this option is selected, make sure that there is at least as
many files as workers. If there are fewer input files than workers, a runtime
error will be raised.
DATA: Shards by elements produced by the dataset. Each worker will process the
whole dataset and discard the portion that is not for itself. Note that for
this mode to correctly partitions the dataset elements, the dataset needs to
produce elements in a deterministic order.
HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a
placeholder to replace with `shard(num_workers, worker_index)`.
"""
# LINT.IfChange
OFF = -1
AUTO = 0
FILE = 1
DATA = 2
HINT = 3
# LINT.ThenChange(//tensorflow/python/data/experimental/ops/data_service_ops.py:tf_data_service_sharding_policy)
@classmethod
def _to_proto(cls, obj):
"""Convert enum to proto."""
if obj == cls.OFF:
return dataset_options_pb2.AutoShardPolicy.OFF
if obj == cls.FILE:
return dataset_options_pb2.AutoShardPolicy.FILE
if obj == cls.DATA:
return dataset_options_pb2.AutoShardPolicy.DATA
if obj == cls.AUTO:
return dataset_options_pb2.AutoShardPolicy.AUTO
if obj == cls.HINT:
return dataset_options_pb2.AutoShardPolicy.HINT
raise ValueError(
f"Invalid `obj.` Supported values include `OFF`, `FILE`, `DATA`,"
f"`AUTO`, and `HINT`. Got {obj.name}."
)
@classmethod
def _from_proto(cls, pb):
"""Convert proto to enum."""
if pb == dataset_options_pb2.AutoShardPolicy.OFF:
return cls.OFF
if pb == dataset_options_pb2.AutoShardPolicy.FILE:
return cls.FILE
if pb == dataset_options_pb2.AutoShardPolicy.DATA:
return cls.DATA
if pb == dataset_options_pb2.AutoShardPolicy.AUTO:
return cls.AUTO
if pb == dataset_options_pb2.AutoShardPolicy.HINT:
return cls.HINT
raise ValueError(
f"Invalid `pb.` Supported values include `OFF`, `FILE`, `DATA`,"
f"`AUTO`, and `HINT`. Got {pb}."
)
@tf_export("data.experimental.ExternalStatePolicy")
| AutoShardPolicy |
python | pytorch__pytorch | torch/nn/modules/dropout.py | {
"start": 2173,
"end": 3847
} | class ____(_DropoutNd):
r"""Randomly zero out entire channels.
A channel is a 1D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 1D tensor :math:`\text{input}[i, j]`.
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv1d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout1d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zero-ed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, L)` or :math:`(C, L)`.
- Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input).
Examples::
>>> m = nn.Dropout1d(p=0.2)
>>> input = torch.randn(20, 16, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.dropout1d(input, self.p, self.training, self.inplace)
| Dropout1d |
python | kamyu104__LeetCode-Solutions | Python/stone-game-vii.py | {
"start": 31,
"end": 606
} | class ____(object):
def stoneGameVII(self, stones):
"""
:type stones: List[int]
:rtype: int
"""
def score(i, j):
return prefix[j+1]-prefix[i]
prefix = [0]
for stone in stones:
prefix.append(prefix[-1]+stone)
dp = [[0 for _ in xrange(len(stones))] for _ in xrange(2)]
for i in reversed(xrange(len(stones))):
for j in xrange(i+1, len(stones)):
dp[i%2][j] = max(score(i+1, j)-dp[(i+1)%2][j], score(i, j-1)-dp[i%2][j-1])
return dp[0][-1]
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/tests/core/test_runner.py | {
"start": 32793,
"end": 38622
} | class ____:
"""Test task creation functionality."""
def test_call_task_with_enable_assets_true_creates_materializing_task(
self, mock_task_state, mock_manifest_node, mock_manifest
):
"""Test that materializing tasks are created when assets are enabled."""
runner = PrefectDbtRunner(manifest=mock_manifest)
context = {"test": "context"}
with patch(
"prefect_dbt.core.runner.MaterializingTask"
) as mock_materializing_task:
mock_task = Mock(spec=MaterializingTask)
mock_materializing_task.return_value = mock_task
runner._call_task(
mock_task_state, mock_manifest_node, context, enable_assets=True
)
mock_materializing_task.assert_called_once()
def test_call_task_with_enable_assets_false_creates_regular_task(
self, mock_task_state, mock_manifest_node, mock_manifest
):
"""Test that regular tasks are created when assets are disabled."""
runner = PrefectDbtRunner(manifest=mock_manifest)
context = {"test": "context"}
with patch("prefect_dbt.core.runner.Task") as mock_task_class:
mock_task = Mock(spec=Task)
mock_task_class.return_value = mock_task
runner._call_task(
mock_task_state, mock_manifest_node, context, enable_assets=False
)
mock_task_class.assert_called_once()
def test_call_task_handles_missing_adapter_type(
self, mock_task_state, mock_manifest_node, mock_manifest
):
"""Test that missing adapter type is handled gracefully."""
runner = PrefectDbtRunner(manifest=mock_manifest)
context = {"test": "context"}
# Remove adapter_type from manifest metadata
mock_manifest.metadata.adapter_type = None
with patch("prefect_dbt.core.runner.Task") as mock_task_class:
mock_task = Mock(spec=Task)
mock_task_class.return_value = mock_task
with pytest.raises(ValueError, match="Adapter type not found in manifest"):
runner._call_task(
mock_task_state, mock_manifest_node, context, enable_assets=True
)
def test_call_task_handles_missing_relation_name_for_assets(
self, mock_task_state, mock_manifest_node, mock_manifest
):
"""Test that missing relation_name is handled when creating assets."""
runner = PrefectDbtRunner(manifest=mock_manifest)
context = {"test": "context"}
# Remove relation_name from manifest node
mock_manifest_node.relation_name = None
with patch("prefect_dbt.core.runner.Task") as mock_task_class:
mock_task = Mock(spec=Task)
mock_task_class.return_value = mock_task
with pytest.raises(ValueError, match="Relation name not found in manifest"):
runner._call_task(
mock_task_state, mock_manifest_node, context, enable_assets=True
)
def test_call_task_with_source_definition_upstream_nodes(
self, mock_task_state, mock_manifest_node, mock_manifest, mock_source_definition
):
"""Test that tasks are created correctly with source definition upstream nodes."""
runner = PrefectDbtRunner(manifest=mock_manifest)
context = {"test": "context"}
# Set up the manifest with a source definition as upstream
mock_manifest.sources = {
"source.test_project.test_source": mock_source_definition
}
mock_manifest_node.depends_on_nodes = ["source.test_project.test_source"]
with patch(
"prefect_dbt.core.runner.MaterializingTask"
) as mock_materializing_task:
mock_task = Mock(spec=MaterializingTask)
mock_materializing_task.return_value = mock_task
runner._call_task(
mock_task_state, mock_manifest_node, context, enable_assets=True
)
mock_materializing_task.assert_called_once()
def test_call_task_with_mixed_upstream_nodes_including_sources(
self, mock_task_state, mock_manifest_node, mock_manifest, mock_source_definition
):
"""Test that tasks are created correctly with mixed upstream nodes including sources."""
runner = PrefectDbtRunner(manifest=mock_manifest)
context = {"test": "context"}
# Create an upstream model node
upstream_node = Mock(spec=ManifestNode)
upstream_node.unique_id = "model.test_project.upstream_model"
upstream_node.config = Mock()
upstream_node.config.meta = {"prefect": {"enable_assets": True}}
upstream_node.relation_name = "upstream_model"
upstream_node.resource_type = NodeType.Model
upstream_node.depends_on_nodes = []
upstream_node.name = "upstream_model"
upstream_node.description = "Upstream model description"
# Set up the manifest with both a model and a source as upstream
mock_manifest.nodes = {"model.test_project.upstream_model": upstream_node}
mock_manifest.sources = {
"source.test_project.test_source": mock_source_definition
}
mock_manifest_node.depends_on_nodes = [
"model.test_project.upstream_model",
"source.test_project.test_source",
]
with patch(
"prefect_dbt.core.runner.MaterializingTask"
) as mock_materializing_task:
mock_task = Mock(spec=MaterializingTask)
mock_materializing_task.return_value = mock_task
runner._call_task(
mock_task_state, mock_manifest_node, context, enable_assets=True
)
mock_materializing_task.assert_called_once()
| TestPrefectDbtRunnerTaskCreation |
python | mlflow__mlflow | dev/clint/src/clint/rules/unknown_mlflow_arguments.py | {
"start": 36,
"end": 518
} | class ____(Rule):
def __init__(self, function_name: str, unknown_args: set[str]) -> None:
self.function_name = function_name
self.unknown_args = unknown_args
def _message(self) -> str:
args_str = ", ".join(f"`{arg}`" for arg in sorted(self.unknown_args))
return (
f"Unknown arguments {args_str} passed to `{self.function_name}`. "
"Check the function signature for valid parameter names."
)
| UnknownMlflowArguments |
python | streamlit__streamlit | lib/tests/streamlit/elements/html_test.py | {
"start": 864,
"end": 10805
} | class ____(DeltaGeneratorTestCase):
"""Test st.html API."""
def test_unsafe_allow_javascript_default_false(self):
"""By default JS execution is disabled (flag False)."""
st.html("<div>Hi</div>")
el = self.get_delta_from_queue().new_element
assert el.html.body == "<div>Hi</div>"
assert el.html.unsafe_allow_javascript is False
def test_unsafe_allow_javascript_true(self):
"""When enabled, the flag is serialized as True."""
st.html("<div>Hi</div>", unsafe_allow_javascript=True)
el = self.get_delta_from_queue().new_element
assert el.html.body == "<div>Hi</div>"
assert el.html.unsafe_allow_javascript is True
def test_unsafe_allow_javascript_style_only_ignores_flag(self):
"""Style-only HTML ignores the JS flag since no scripts can execute."""
css = "<style>body{background:red}</style>"
st.html(css, unsafe_allow_javascript=True)
# First message routes the style-only tag to the event container; then
# the element
_ = self.get_message_from_queue()
style_el = self.get_delta_from_queue().new_element
assert style_el.html.body == css
assert style_el.html.unsafe_allow_javascript is False
def test_st_html(self):
"""Test st.html."""
st.html("<i> This is a i tag </i>")
el = self.get_delta_from_queue().new_element
assert el.html.body == "<i> This is a i tag </i>"
def test_st_html_empty_body_throws_error(self):
"""Test st.html with empty body throws error."""
with pytest.raises(StreamlitAPIException) as ctx:
st.html("")
assert "`st.html` body cannot be empty" in str(ctx.value)
def test_st_html_with_style_tag_only(self):
"""Test st.html with only a style tag."""
st.html("<style>.stHeading h3 { color: purple; }</style>")
# The style tag should be enqueued to the event delta generator
style_msg = self.get_message_from_queue()
assert style_msg.metadata.delta_path == [2, 0]
# Check that html body is the expected style tag
style_el = self.get_delta_from_queue().new_element
assert style_el.html.body == "<style>.stHeading h3 { color: purple; }</style>"
def test_st_html_with_style_tag_only_case_insensitive(self):
"""Test st.html with only a style tag (case insensitive)."""
st.html("<STYLE>.stHeading h3 { color: purple; }</STYLE>")
# The style tag should be enqueued to the event delta generator
style_msg = self.get_message_from_queue()
assert style_msg.metadata.delta_path == [2, 0]
# Check that html body is the expected STYLE tag
style_el = self.get_delta_from_queue().new_element
assert style_el.html.body == "<STYLE>.stHeading h3 { color: purple; }</STYLE>"
def test_st_html_with_comments(self):
"""Test st.html with comments."""
# Check comment at start of string
st.html("<!-- HTML Comment --> <style>.stMarkdown h4 { color: blue; }</style>")
# The style tag should be enqueued to the event delta generator (comment & its location don't matter)
style_msg = self.get_message_from_queue()
assert style_msg.metadata.delta_path == [2, 0]
style_el = self.get_delta_from_queue().new_element
assert (
style_el.html.body
== "<!-- HTML Comment --> <style>.stMarkdown h4 { color: blue; }</style>"
)
# Check comment at end of string
st.html("<style>.stMarkdown h4 { color: blue; }</style> <!-- HTML Comment -->")
style_msg = self.get_message_from_queue()
assert style_msg.metadata.delta_path == [2, 1]
style_el = self.get_delta_from_queue().new_element
assert (
style_el.html.body
== "<style>.stMarkdown h4 { color: blue; }</style> <!-- HTML Comment -->"
)
def test_st_html_with_style_and_other_tags(self):
"""Test st.html with style and other tags."""
st.html("<style>.stHeading h3 { color: purple; }</style><h1>Hello, World!</h1>")
# Since there's a mix of style and other tags, html is enqueued to the main delta generator
msg = self.get_message_from_queue()
assert msg.metadata.delta_path == [0, 0]
el = self.get_delta_from_queue().new_element
assert (
el.html.body
== "<style>.stHeading h3 { color: purple; }</style><h1>Hello, World!</h1>"
)
def test_st_html_with_css_file(self):
"""Test st.html with CSS file."""
st.html(pathlib.Path(__file__).parent / "test_html.css")
el = self.get_delta_from_queue().new_element
# Check that the CSS file contents are wrapped in a style tag
assert (
el.html.body
== "<style>h1 {\n color: red;\n}\n\nh2 {\n color: blue;\n}\n</style>"
)
def test_st_html_with_file(self):
"""Test st.html with file."""
st.html(str(pathlib.Path(__file__).parent / "test_html.js"))
el = self.get_delta_from_queue().new_element
assert el.html.body.strip() == "<button>Corgi</button>"
def test_st_html_with_path(self):
"""Test st.html with path."""
st.html(pathlib.Path(__file__).parent / "test_html.js")
el = self.get_delta_from_queue().new_element
assert el.html.body.strip() == "<button>Corgi</button>"
def test_st_html_with_dunderstr(self):
"""Test st.html with __str__."""
class MyClass:
def __str__(self):
return "mystr"
obj = MyClass()
st.html(obj)
el = self.get_delta_from_queue().new_element
assert el.html.body == "mystr"
def test_st_html_with_repr_html(self):
"""Test st.html with _repr_html_."""
class MyClass:
def _repr_html_(self):
return "<div>html</div>"
obj = MyClass()
st.html(obj)
el = self.get_delta_from_queue().new_element
assert el.html.body == "<div>html</div>"
def test_st_html_with_repr_html_and_dunderstr(self):
"""Test st.html with _repr_html_ and dunderstr: html should win."""
class MyClass:
def __str__(self):
return "mystr"
def _repr_html_(self):
return "<div>html</div>"
obj = MyClass()
st.html(obj)
el = self.get_delta_from_queue().new_element
assert el.html.body == "<div>html</div>"
def test_st_html_with_width(self):
"""Test st.html with different width types."""
test_cases = [
(500, WidthConfigFields.PIXEL_WIDTH.value, "pixel_width", 500),
("stretch", WidthConfigFields.USE_STRETCH.value, "use_stretch", True),
("content", WidthConfigFields.USE_CONTENT.value, "use_content", True),
]
for width_value, expected_width_spec, field_name, field_value in test_cases:
with self.subTest(width_value=width_value):
st.html("<p>test html</p>", width=width_value)
el = self.get_delta_from_queue().new_element
assert el.html.body == "<p>test html</p>"
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(el.width_config, field_name) == field_value
def test_st_html_with_invalid_width(self):
"""Test st.html with invalid width values."""
test_cases = [
(
"invalid",
"Invalid width value: 'invalid'. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
-100,
"Invalid width value: -100. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
0,
"Invalid width value: 0. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
100.5,
"Invalid width value: 100.5. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
]
for width_value, expected_error_message in test_cases:
with self.subTest(width_value=width_value):
with pytest.raises(StreamlitAPIException) as exc:
st.html("<p>test html</p>", width=width_value)
assert str(exc.value) == expected_error_message
def test_st_html_default_width(self):
"""Test that st.html defaults to stretch width."""
st.html("<p>test html</p>")
el = self.get_delta_from_queue().new_element
assert el.html.body == "<p>test html</p>"
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
def test_st_html_style_only_no_width_config(self):
"""Test that st.html with only style tags doesn't apply width configuration."""
st.html("<style>.test { color: red; }</style>", width=300)
# The style tag should be enqueued to the event delta generator
style_msg = self.get_message_from_queue()
assert style_msg.metadata.delta_path == [2, 0]
# Check that html body is the expected style tag
style_el = self.get_delta_from_queue().new_element
assert style_el.html.body == "<style>.test { color: red; }</style>"
# Verify that no width configuration is applied for style-only HTML
assert not style_el.HasField("width_config")
def test_st_html_with_nonhtml_filelike_str(self):
"""Test st.html with a string that's neither HTML-like nor a real file."""
st.html("foo/fake.html")
el = self.get_delta_from_queue().new_element
assert el.html.body == "foo/fake.html"
| StHtmlAPITest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI019_0.py | {
"start": 5008,
"end": 5130
} | class ____(type):
def m(cls: MetaType) -> MetaType:
return cls
from __future__ import annotations
| MetaTestClass |
python | pytorch__pytorch | test/inductor/test_inductor_freezing.py | {
"start": 5483,
"end": 33758
} | class ____(TestCase):
def test_mutation(self):
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mutated_param = torch.nn.Parameter(torch.zeros([10, 10]))
def forward(self):
self.mutated_param.add_(10)
return self.mutated_param
with torch.no_grad():
mod = Mod().to(self.device)
out_eager = mod()
out_eager2 = mod()
mod = Mod().to(self.device)
@torch.compile
def foo(mod):
return mod()
out_comp = foo(mod)
out_comp2 = foo(mod)
self.assertEqual(out_eager, out_comp)
self.assertEqual(out_eager2, out_comp2)
def test_aliased_param_return(self):
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.aliased_param = torch.nn.Parameter(torch.zeros([10, 10]))
def forward(self):
return self.aliased_param[1:], self.aliased_param
mod = Mod().to(self.device).eval()
@torch.compile()
def foo(mod):
return mod()
with torch.no_grad():
mod_eager = mod()
self.assertEqual(foo(mod), mod_eager)
def test_autocast(self):
if self.device == "cpu":
raise unittest.SkipTest("MLKDNN Bug")
mod = torch.nn.Linear(10, 10).to(self.device).eval()
inp = torch.rand([10, 10]).to(self.device).to(torch.half)
@torch.compile()
def foo(mod, inp):
return mod(inp)
with torch.no_grad():
with torch.autocast(self.device):
out_eager = mod(inp)
out_compiled, code = run_and_get_code(foo, mod, inp)
FileCheck().check_not("@triton.jit").run(code[0])
self.assertEqual(out_eager, out_compiled)
@torch._inductor.config.patch("cpp.enable_concat_linear", True)
def test_mm_concat(self):
class MM(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.t1 = torch.nn.Parameter(torch.rand(10, 10))
self.t2 = torch.nn.Parameter(torch.rand(10, 10))
self.t3 = torch.nn.Parameter(torch.rand(10, 10))
def forward(self, x):
return x @ self.t1, x @ self.t2, x @ self.t3
class MM2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.t1 = torch.nn.Parameter(torch.rand(10, 10))
self.t2 = torch.nn.Parameter(torch.rand(10, 10))
def forward(self, x):
return x @ self.t1, x @ self.t2
class AddMM(MM):
def __init__(self) -> None:
super().__init__()
self.b1 = torch.nn.Parameter(torch.rand([10]))
self.b2 = torch.nn.Parameter(torch.rand([10]))
self.b3 = torch.nn.Parameter(torch.rand([10]))
def forward(self, x):
return [
aten.addmm(b, x, p)
for b, p in [
(self.b1, self.t1),
(self.b2, self.t2),
(self.b3, self.t3),
]
]
for mod_fn in [
lambda: MM().to(self.device),
lambda: MM2().to(self.device),
lambda: AddMM().to(self.device),
]:
mod = mod_fn()
inp = torch.rand([10, 10]).to(self.device)
@torch.compile()
def foo(mod, inp):
return mod(inp)
kernel_invoke = "kernel_cpp_0" if self.device == "cpu" else "triton.jit"
mm_invoke = "mm("
# https://github.com/pytorch/pytorch/blob/e754611d190b323e53c5d17db0dc39a96687513c/torch/_inductor/fx_passes/mkldnn_fusion.py#L1263
mkldnn_weight_pack_init = (
torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available()
)
if self.device == "cpu" and mkldnn_weight_pack_init:
if torch.ops.mkldnn._is_mkldnn_acl_supported():
# for aarch64 with acl supported, use mkldnn weight prepack
# https://github.com/pytorch/pytorch/blob/e754611d190b323e53c5d17db0dc39a96687513c/torch/_inductor/fx_passes/mkldnn_fusion.py#L1176-L1184
mm_invoke = "mkldnn._linear_pointwise.default("
elif torch._C.has_mkl:
mm_invoke = "mkl_linear.default("
with torch.no_grad():
out_eager = mod(inp)
out, code = run_and_get_code(foo, mod, inp)
FileCheck().check_not(kernel_invoke).check_count(
mm_invoke, count=1, exactly=True
).run(code[0])
self.assertEqual(out_eager, out)
mod2 = mod_fn()
mod2.t1 = torch.nn.Parameter(torch.rand([10, 15], device=self.device))
mod2.t2 = torch.nn.Parameter(torch.rand([10, 20], device=self.device))
if hasattr(mod2, "b1"):
mod2.b1 = torch.nn.Parameter(torch.rand([15], device=self.device))
mod2.b2 = torch.nn.Parameter(torch.rand([20], device=self.device))
# not fused
count = 3 if hasattr(mod2, "t3") else 2
with torch.no_grad():
out_eager = mod2(inp)
out, code = run_and_get_code(foo, mod2, inp)
FileCheck().check_not(kernel_invoke).check_count(
mm_invoke, count=count, exactly=True
).run(code[0])
self.assertEqual(out_eager, out)
# With inlining of inbuilt nn modules, Dynamo traces the innards of inbuilt
# module and does not modify the eager module.
@torch._dynamo.config.patch(inline_inbuilt_nn_modules=False)
def test_error_on_eager(self):
mod = ConvBN(3, 32, kernel_size=3, stride=2).eval().to(self.device)
x = torch.rand(3, 3, 32, 32).to(self.device)
@torch.compile()
def foo(mod, x):
return mod(x)
with torch.no_grad():
foo(mod, x)
with self.assertRaisesRegex(
RuntimeError, "Trying to run Pytorch Eager Module after Dynamo Freezing"
):
mod(x)
def test_static_indices_cudagraph(self):
if self.device != "cuda":
return
mod1 = torch.nn.Sequential(
torch.nn.Linear(2, 2).to(self.device), torch.nn.Linear(2, 2).to(self.device)
)
mod2 = copy.deepcopy(mod1)
def fn(x, y, mod):
x.add_(1)
getattr(mod, "0").bias.add_(2)
getattr(mod, "1").weight.add_(3)
return mod(x) + y
x1 = torch.randn(2, 2, device=self.device)
y1 = torch.randn(2, 2, device=self.device)
x2 = x1.clone()
y2 = y1.clone()
opt_fn = torch.compile(fn, mode="reduce-overhead")
with torch.no_grad():
ref = fn(x1, y1, mod1)
res = opt_fn(x2, y2, mod2)
self.assertEqual(ref, res)
self.assertEqual(x1, x2)
self.assertEqual(y1, y2)
def test_rng_op(self):
@torch.compile()
def foo():
return torch.rand([4, 4], device=self.device) + 1
with torch.no_grad():
o1 = foo()
o2 = foo()
self.assertNotEqual(o1, o2)
def test_symint_not_folded(self):
def fn(a):
return a.cos(), torch.zeros(a.shape[0], a.shape[1])
fn_opt = torch.compile(fn, backend="inductor", dynamic=True)
inp = torch.randn(2, 4, 6).to(self.device)
torch._dynamo.mark_dynamic(inp, 0)
torch._dynamo.mark_dynamic(inp, 1)
with torch.no_grad():
self.assertEqual(fn(inp), fn_opt(inp))
inp2 = torch.randn(3, 5, 6).to(self.device)
torch._dynamo.mark_dynamic(inp2, 0)
torch._dynamo.mark_dynamic(inp2, 1)
self.assertEqual(fn(inp2), fn_opt(inp2))
@requires_gpu()
def test_conv_multiple_uses(self):
from torch import nn
class ToyModel(nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.conv1 = nn.Conv2d(1, 1, 1)
self.bn1 = nn.BatchNorm2d(1)
self.bn1.weight.data.normal_()
def forward(self, x, y):
return self.conv1(x) + self.bn1(self.conv1(y))
model = ToyModel()
model.eval().to(GPU_TYPE)
a = torch.rand(64, 1, 32, 32).to(GPU_TYPE)
b = torch.rand(64, 1, 32, 32).to(GPU_TYPE)
output = model(a, b)
with torch.no_grad():
output2 = torch.compile(model)(a, b)
self.assertEqual(output, output2)
def test_unfolded_bn(self):
x = torch.rand([3, 32, 15, 15]).to(self.device)
mod = torch.nn.BatchNorm2d(32, eps=0.001).eval().to(self.device)
@torch.compile()
def foo(mod, x):
return mod(x) + 10
out_compiled_no_inference = foo(mod, x)
# would error if not decomposed
with torch.no_grad():
out_compiled = foo(mod, x)
self.assertEqual(out_compiled_no_inference, out_compiled)
@torch._inductor.config.patch(layout_optimization=False)
def test_folded_conv_bn(self):
for use_bias, dtype in itertools.product(
[True, False], [torch.float16, torch.bfloat16, torch.float32]
):
if self.device == "cpu" and dtype == torch.float16:
continue
if self.device == GPU_TYPE and dtype == torch.bfloat16 and not SM80OrLater:
continue
mod = (
ConvBN(3, 32, bias=use_bias, kernel_size=3, stride=2)
.eval()
.to(self.device)
.to(dtype)
)
x = torch.rand(3, 3, 32, 32).to(self.device).to(dtype)
torch._dynamo.reset()
counters.clear()
@torch.compile()
def foo(mod, x):
return mod(x)
# TODO - bias is separate kernel right now, we should only unfuse it
# from conv if it can be fused
with torch.no_grad():
out_eager = mod(x)
out_optimized_for_infernece, code = run_and_get_code(foo, mod, x)
# we unfuse the conv bias, but it should only have one constant in the kernel
if self.device == "cuda":
FileCheck().check_not(".run(").check("conv").check(".run(").check_same(
"frozen_param"
).check_not("frozen_param").check_next("return").run(code[0])
self.assertEqual(
out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2
)
self.assertEqual(counters["inductor"]["binary_folding"], 4)
@torch._inductor.config.patch(layout_optimization=False)
def test_folded_conv_bn_hardswish(self):
for use_bias, dtype in itertools.product(
[True, False], [torch.float16, torch.bfloat16, torch.float32]
):
if self.device == "cpu" and dtype == torch.float16:
continue
if self.device == GPU_TYPE and dtype == torch.bfloat16 and not SM80OrLater:
continue
mod = (
ConvBNHardswish(3, 32, bias=use_bias, kernel_size=3, stride=2)
.eval()
.to(self.device)
.to(dtype)
)
x = torch.rand(3, 3, 32, 32).to(self.device).to(dtype)
torch._dynamo.reset()
counters.clear()
@torch.compile()
def foo(mod, x):
return mod(x)
# TODO - bias is separate kernel right now, we should only unfuse it
# from conv if it can be fused
with torch.no_grad():
out_eager = mod(x)
out_optimized_for_infernece, code = run_and_get_code(foo, mod, x)
# we unfuse the conv bias, but it should only have one constant in the kernel
if self.device == "cuda":
FileCheck().check_not(".run(").check("conv").check(".run(").check_same(
"frozen_param"
).check_not("frozen_param").check_next("return").run(code[0])
self.assertEqual(
out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2
)
self.assertEqual(counters["inductor"]["binary_folding"], 4)
@torch._inductor.config.patch(layout_optimization=False)
def test_folded_conv_bn_with_module_sharing(self):
mod = (
ConvBN(32, 32, bias=True, kernel_size=3, stride=2)
.to(self.device)
.to(torch.float32)
)
# Update the default parameters of BN module
for _ in range(10):
mod(torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32))
mod.eval()
x = torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32)
def foo(mod, x):
mod(x)
return mod(x)
with torch.no_grad():
out_eager = foo(mod, x)
out_optimized_for_infernece, _ = run_and_get_code(
torch.compile(foo), mod, x
)
self.assertEqual(out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2)
@torch._inductor.config.patch(layout_optimization=False)
def test_folded_conv_functional_bn_with_module_sharing(self):
x = torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32)
running_mean = torch.mean(x, dim=(0, 2, 3)).to(self.device)
running_var = torch.var(x, dim=(0, 2, 3)).to(self.device)
mod = (
ConvFunctionalBN(
32,
32,
bias=True,
kernel_size=3,
stride=2,
running_mean=running_mean,
running_var=running_var,
weight=torch.ones(32).to(self.device),
bn_bias=torch.zeros(32).to(self.device),
)
.eval()
.to(self.device)
.to(torch.float32)
)
def foo(mod, x):
mod(x)
return mod(x)
with torch.no_grad():
out_eager = foo(mod, x)
out_optimized_for_infernece, _ = run_and_get_code(
torch.compile(foo), mod, x
)
self.assertEqual(out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2)
@torch._inductor.config.patch(layout_optimization=False)
def test_conv_bn_with_multi_bn_share_conv(self):
mod = (
ConvMultiBN(32, 32, bias=True, kernel_size=3, stride=2)
.to(self.device)
.to(torch.float32)
)
# Update the default parameters of BN module
for _ in range(10):
mod(torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32))
mod.eval()
x = torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32)
def foo(mod, x):
return mod(x)
with torch.no_grad():
out_eager = foo(mod, x)
out_optimized_for_infernece, _ = run_and_get_code(
torch.compile(foo), mod, x
)
self.assertEqual(out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2)
@torch._inductor.config.patch(layout_optimization=False)
def test_conv_functional_bn_with_multi_bn_share_conv(self):
x = torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32)
running_mean = torch.mean(x, dim=(0, 2, 3)).to(self.device)
running_var = torch.var(x, dim=(0, 2, 3)).to(self.device)
running_mean2 = torch.mean(x, dim=(0, 2, 3)).to(self.device)
mod = (
ConvMultiFunctionalBN(
32,
32,
bias=True,
kernel_size=3,
stride=2,
running_mean=running_mean,
running_var=running_var,
weight=torch.ones(32).to(self.device),
bn_bias=torch.zeros(32).to(self.device),
running_mean2=running_mean2,
)
.eval()
.to(self.device)
.to(torch.float32)
)
def foo(mod, x):
return mod(x)
with torch.no_grad():
out_eager = foo(mod, x)
out_optimized_for_infernece, _ = run_and_get_code(
torch.compile(foo), mod, x
)
self.assertEqual(out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2)
@torch._inductor.config.patch(layout_optimization=False)
def test_dont_change_dtype_folding(self):
dtype = torch.float16 if self.device == GPU_TYPE else torch.bfloat16
mod = (
torch.nn.Conv2d(3, 32, bias=None, kernel_size=3, stride=2)
.eval()
.to(self.device)
.to(dtype)
)
x = torch.rand(3, 3, 32, 32).to(self.device).to(dtype)
def foo(mod, x):
return mod(x) * torch.full([1], 2.0, device=self.device)
foo_c = torch.compile(foo)
with torch.no_grad():
out_eager = foo(mod, x)
out_compiled = foo_c(mod, x)
self.assertEqual(out_eager, out_compiled)
def test_param_deallocated(self):
# TODO: cpu path keeps an extra copy of graph around somewhere,
# memory not as important for cpu
if self.device == "cpu":
raise unittest.SkipTest("NYI CPU")
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.zeros([10, 10]))
def forward(self, x):
return (self.param + 10) + x
mod = Mod().eval().to(self.device)
inp = torch.rand([10], device=self.device)
with torch.no_grad():
eager = mod(inp)
weight_ref = weakref.ref(mod.param)
@torch.compile()
def foo(mod, inp):
return mod(inp)
with torch.no_grad():
compiled = foo(mod, inp)
self.assertEqual(eager, compiled)
self.assertTrue(weight_ref() is None)
def test_conv_with_as_strided(self):
class Model(nn.Module):
def __init__(self, groups):
super().__init__()
self.kv = torch.nn.Conv2d(
256,
384,
kernel_size=(1, 1),
stride=(1, 1),
bias=False,
groups=groups,
)
def forward(self, x):
convolution = self.kv(x)
constant_pad_nd = torch.ops.aten.constant_pad_nd.default(
convolution, [2, 2, 2, 2], 0.0
)
# as_strided inputs are depend on input's size and stide.
as_strided = torch.ops.aten.as_strided.default(
constant_pad_nd, [8, 384, 2, 20, 12], [153600, 400, 160, 1, 20]
)
as_strided_1 = torch.ops.aten.as_strided.default(
as_strided, [8, 384, 2, 2, 12, 12], [153600, 400, 160, 8, 20, 1]
)
clone = torch.ops.aten.clone.default(
as_strided_1, memory_format=torch.contiguous_format
)
return clone
@torch.compile()
def foo(mod, inp):
return mod(inp)
with torch.no_grad():
x = torch.randn(8, 256, 16, 16).to(self.device)
for groups in [1, 2]:
mod = Model(groups).to(self.device).eval()
mod_eager = mod(x)
self.assertEqual(foo(mod, x), mod_eager)
@skipIfXpu
@unittest.skipIf(IS_FBCODE, "Not yet runnable in fbcode")
@unittest.skipIf(
TEST_WITH_SLOW_GRADCHECK,
"Failing in slow gradcheck on cuda12.8, see https://github.com/pytorch/pytorch/pull/156731 for example",
)
def test_cpp_wrapper(self):
mod = ConvBN(3, 32, kernel_size=3, stride=2).eval().to(self.device)
x = torch.rand(3, 3, 32, 32).to(self.device)
@torch.compile(options={"cpp_wrapper": True})
def foo(mod, x):
return mod(x)
out_eager = mod(x)
with torch.no_grad():
self.assertEqual(foo(mod, x), out_eager)
self.assertEqual(foo(mod, x), out_eager)
@tf32_on_and_off(0.001)
def test_conv_layout_convert_with_view(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(
3, 128, kernel_size=3, padding=1, stride=1, bias=False
)
self.bn = nn.BatchNorm2d(3)
def forward(self, x):
x = self.bn(x)
x = self.conv(x)
return torch.flatten(x, 1)
mod = Model().to(self.device).eval()
@torch.compile()
def foo(mod, inp):
return mod(inp)
with torch.no_grad():
x = torch.rand(2, 3, 5, 5).to(self.device)
mod_eager = mod(x)
self.assertEqual(foo(mod, x), mod_eager)
@skipIfRocm
def test_conv_weight_layout_convert(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(
3, 128, kernel_size=3, padding=1, stride=1, bias=False
)
def forward(self, x):
return self.conv(x)
@staticmethod
def get_example_inputs():
return (torch.rand(2, 3, 5, 5).to(self.device),)
from torch._inductor.compile_fx import compile_fx, compile_fx_inner
nconv = 0
def my_inner_compile(gm, example_inputs, *args, **kwargs):
out = compile_fx_inner(gm, example_inputs, *args, **kwargs)
nonlocal nconv
convs = [n for n in gm.graph.nodes if n.target == aten.convolution.default]
nconv += len(convs)
for conv in convs:
weight_node = conv.args[1]
weight_const_tensor = getattr(gm, weight_node.target)
self.assertTrue(
weight_const_tensor.is_contiguous(memory_format=torch.channels_last)
)
self.assertTrue(
weight_node.meta["val"].is_contiguous(
memory_format=torch.channels_last
)
)
return out
mod = torch.compile(
Model().eval().to(self.device),
backend=functools.partial(compile_fx, inner_compile=my_inner_compile),
)
inp = mod.get_example_inputs()
with torch.no_grad():
mod(*inp)
# Only check the assertion for CUDA.
# For CPU, we may get torch.ops.mkldnn._convolution_pointwise.default
# in the joint graph rather than torch.ops.aten.convolution.default.
# Currently we only handle aten.convolution.default in layout
# optimization. That's why the count may be 0 here for CPU.
if self.device == "cuda":
self.assertTrue(nconv == 1)
def test_unequal_bias_horizontal_addmm_fusion(self):
device = self.device
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w1 = torch.tensor(
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], device=device
)
self.b1 = torch.zeros(3, device=device)
self.w2 = torch.tensor(
[[0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0]], device=device
)
self.b2 = torch.tensor([[-1.0, -1.0, -1.0]], device=device)
self.w3 = torch.tensor(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], device=device
)
self.b3 = torch.tensor([1.0, 2.0, 3.0], device=device)
def forward(self, x):
out1 = torch.nn.functional.linear(x, self.w1, self.b1)
out2 = torch.nn.functional.linear(x, self.w2, self.b2)
out3 = torch.nn.functional.linear(x, self.w3, self.b3)
return (out1, out2, out3)
func = Model().to(device).eval()
x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], device=device)
with torch.no_grad():
out_eager = func(x.clone())
func1 = torch.compile(func)
out_compiled = func1(x.clone())
self.assertEqual(out_eager, out_compiled)
@skipIfRocm
@tf32_on_and_off(0.001)
def test_redundant_clone_for_layout_convert(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(
3, 128, kernel_size=3, padding=1, stride=1, bias=False
)
def forward(self, x):
y = x + 1
return self.conv(x), y
@staticmethod
def get_example_inputs():
return (torch.rand(2, 3, 5, 5).to(self.device),)
mod = Model().eval().to(self.device)
inp = mod.get_example_inputs()
with torch.no_grad():
expected_outputs = mod(*inp)
num_same_stride = 0
num_diff_stride = 0
def debug_inductor_force_stride_order(orig_fn, input_tensor, stride):
nonlocal num_same_stride, num_diff_stride
input_tensor.realize()
if tuple(input_tensor.get_stride()) == tuple(stride):
num_same_stride += 1
else:
num_diff_stride += 1
return orig_fn(input_tensor, stride)
with override_lowering(
prims.inductor_force_stride_order.default, debug_inductor_force_stride_order
):
opt_mod = torch.compile(mod)
with torch.no_grad():
actual_outputs = opt_mod(*inp)
self.assertEqual(len(actual_outputs), len(expected_outputs))
self.assertEqual(2, len(actual_outputs))
for actual, expected in zip(actual_outputs, expected_outputs):
self.assertEqual(expected, actual)
if self.device == "cpu":
# CPU use different convolution implementation, skip the checks below
return
self.assertTrue(
actual_outputs[0].is_contiguous(memory_format=torch.contiguous_format)
)
self.assertTrue(
actual_outputs[1].is_contiguous(memory_format=torch.contiguous_format)
)
# we don't change the stride of y returned by forward. So there will
# be no extra copy
self.assertTrue(num_same_stride == 1, f"num_same_stride is {num_same_stride}")
# we changed the stride of self.conv(x) returned by forward. So there
# may be an extra copy
self.assertTrue(num_diff_stride == 1, f"num_diff_stride is {num_diff_stride}")
if TEST_WITH_ROCM:
torch._inductor.config.force_layout_optimization = 1
os.environ["PYTORCH_MIOPEN_SUGGEST_NHWC"] = "1"
if HAS_CPU and not torch.backends.mps.is_available():
class FreezingCpuTests(TestCase):
common = check_model
device = "cpu"
autocast = torch.cpu.amp.autocast
copy_tests(OptimizeForInferenceTemplate, FreezingCpuTests, "cpu")
if HAS_GPU:
class FreezingGpuTests(TestCase):
common = check_model_gpu
device = GPU_TYPE
copy_tests(OptimizeForInferenceTemplate, FreezingGpuTests, GPU_TYPE)
del OptimizeForInferenceTemplate
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if HAS_CPU or HAS_GPU:
run_tests(needs="filelock")
| OptimizeForInferenceTemplate |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataproc.py | {
"start": 57386,
"end": 66237
} | class ____(GoogleCloudBaseOperator):
"""
Base class for operators that launch job on DataProc.
:param region: The specified region where the dataproc cluster is created.
:param job_name: The job name used in the DataProc cluster. This name by default
is the task_id appended with the execution data, but can be templated. The
name will always be appended with a random number to avoid name clashes.
:param cluster_name: The name of the DataProc cluster.
:param project_id: The ID of the Google Cloud project the cluster belongs to,
if not specified the project will be inferred from the provided GCP connection.
:param dataproc_properties: Map for the Hive properties. Ideal to put in
default arguments (templated)
:param dataproc_jars: HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop
MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. (templated)
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param labels: The labels to associate with this job. Label keys must contain 1 to 63 characters,
and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be associated with a job.
:param job_error_states: Job states that should be considered error states.
Any states in this set will result in an error being raised and failure of the
task. Eg, if the ``CANCELLED`` state should also be considered a task failure,
pass in ``{'ERROR', 'CANCELLED'}``. Possible values are currently only
``'ERROR'`` and ``'CANCELLED'``, but could change in the future. Defaults to
``{'ERROR'}``.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag to return after submitting the job to the Dataproc API.
This is useful for submitting long running jobs and
waiting on them asynchronously using the DataprocJobSensor
:param deferrable: Run operator in the deferrable mode
:param polling_interval_seconds: time in seconds between polling for job completion.
The value is considered only when running in deferrable mode. Must be greater than 0.
:var dataproc_job_id: The actual "jobId" as submitted to the Dataproc API.
This is useful for identifying or linking to the job in the Google Cloud Console
Dataproc UI, as the actual "jobId" submitted to the Dataproc API is appended with
an 8 character random string.
:vartype dataproc_job_id: str
"""
job_type = ""
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
region: str,
job_name: str = "{{task.task_id}}_{{ds_nodash}}",
cluster_name: str = "cluster-1",
project_id: str = PROVIDE_PROJECT_ID,
dataproc_properties: dict | None = None,
dataproc_jars: list[str] | None = None,
gcp_conn_id: str = "google_cloud_default",
labels: dict | None = None,
job_error_states: set[str] | None = None,
impersonation_chain: str | Sequence[str] | None = None,
asynchronous: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
**kwargs,
) -> None:
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.gcp_conn_id = gcp_conn_id
self.labels = labels
self.job_name = job_name
self.cluster_name = cluster_name
self.dataproc_properties = dataproc_properties
self.dataproc_jars = dataproc_jars
self.region = region
self.job_error_states = job_error_states or {"ERROR"}
self.impersonation_chain = impersonation_chain
self.hook = DataprocHook(gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain)
self.project_id = project_id or self.hook.project_id
self.job_template: DataProcJobBuilder | None = None
self.job: dict | None = None
self.dataproc_job_id = None
self.asynchronous = asynchronous
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def create_job_template(self) -> DataProcJobBuilder:
"""Initialize `self.job_template` with default values."""
if self.project_id is None:
raise AirflowException(
"project id should either be set via project_id parameter or retrieved from the connection,"
)
job_template = DataProcJobBuilder(
project_id=self.project_id,
task_id=self.task_id,
cluster_name=self.cluster_name,
job_type=self.job_type,
properties=self.dataproc_properties,
)
job_template.set_job_name(self.job_name)
job_template.add_jar_file_uris(self.dataproc_jars)
job_template.add_labels(self.labels)
self.job_template = job_template
return job_template
def _generate_job_template(self) -> str:
if self.job_template:
job = self.job_template.build()
return job["job"]
raise AirflowException("Create a job template before")
def execute(self, context: Context):
if self.job_template:
self.job = self.job_template.build()
if self.job is None:
raise AirflowException("The job should be set here.")
self.dataproc_job_id = self.job["job"]["reference"]["job_id"]
self.log.info("Submitting %s job %s", self.job_type, self.dataproc_job_id)
job_object = self.hook.submit_job(
project_id=self.project_id, job=self.job["job"], region=self.region
)
job_id = job_object.reference.job_id
self.log.info("Job %s submitted successfully.", job_id)
# Save data required for extra links no matter what the job status will be
DataprocLink.persist(
context=context,
url=DATAPROC_JOB_LINK_DEPRECATED,
resource=job_id,
region=self.region,
project_id=self.project_id,
)
if self.deferrable:
self.defer(
trigger=DataprocSubmitTrigger(
job_id=job_id,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
if not self.asynchronous:
self.log.info("Waiting for job %s to complete", job_id)
self.hook.wait_for_job(job_id=job_id, region=self.region, project_id=self.project_id)
self.log.info("Job %s completed successfully.", job_id)
return job_id
raise AirflowException("Create a job template before")
def execute_complete(self, context, event=None) -> None:
"""
Act as a callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
job_state = event["job_state"]
job_id = event["job_id"]
if job_state == JobStatus.State.ERROR:
raise AirflowException(f"Job failed:\n{job_id}")
if job_state == JobStatus.State.CANCELLED:
raise AirflowException(f"Job was cancelled:\n{job_id}")
self.log.info("%s completed successfully.", self.task_id)
return job_id
def on_kill(self) -> None:
"""Act as a callback called when the operator is killed; cancel any running job."""
if self.dataproc_job_id:
self.hook.cancel_job(project_id=self.project_id, job_id=self.dataproc_job_id, region=self.region)
| DataprocJobBaseOperator |
python | walkccc__LeetCode | solutions/2609. Find the Longest Balanced Substring of a Binary String/2609-2.py | {
"start": 0,
"end": 328
} | class ____:
def findTheLongestBalancedSubstring(self, s: str) -> int:
ans = 0
zeros = 0
ones = 0
for c in s:
if c == '0':
zeros = 1 if ones > 0 else zeros + 1
ones = 0
else: # c == '1'
ones += 1
if zeros >= ones:
ans = max(ans, ones)
return ans * 2
| Solution |
python | django__django | tests/forms_tests/widget_tests/test_selectdatewidget.py | {
"start": 287,
"end": 31062
} | class ____(WidgetTest):
maxDiff = None
widget = SelectDateWidget(
years=(
"2007",
"2008",
"2009",
"2010",
"2011",
"2012",
"2013",
"2014",
"2015",
"2016",
),
)
def test_render_empty(self):
self.check_html(
self.widget,
"mydate",
"",
html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option selected value="">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option selected value="">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option selected value="">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
),
)
def test_render_none(self):
"""
Rendering the None or '' values should yield the same output.
"""
self.assertHTMLEqual(
self.widget.render("mydate", None),
self.widget.render("mydate", ""),
)
def test_render_string(self):
self.check_html(
self.widget,
"mydate",
"2010-04-15",
html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected>April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected>15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected>2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
),
)
def test_render_datetime(self):
self.assertHTMLEqual(
self.widget.render("mydate", date(2010, 4, 15)),
self.widget.render("mydate", "2010-04-15"),
)
def test_render_invalid_date(self):
"""
Invalid dates should still render the failed date.
"""
self.check_html(
self.widget,
"mydate",
"2010-02-31",
html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="">---</option>
<option value="1">January</option>
<option value="2" selected>February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected>31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected>2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
),
)
def test_custom_months(self):
widget = SelectDateWidget(months=MONTHS_AP, years=("2013",))
self.check_html(
widget,
"mydate",
"",
html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option selected value="">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option selected value="">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option selected value="">---</option>
<option value="2013">2013</option>
</select>
"""
),
)
def test_selectdate_required(self):
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=True)
self.assertFalse(GetNotRequiredDate().fields["mydate"].widget.is_required)
self.assertTrue(GetRequiredDate().fields["mydate"].widget.is_required)
def test_selectdate_empty_label(self):
w = SelectDateWidget(years=("2014",), empty_label="empty_label")
# Rendering the default state with empty_label set as string.
self.assertInHTML(
'<option selected value="">empty_label</option>',
w.render("mydate", ""),
count=3,
)
w = SelectDateWidget(
years=("2014",), empty_label=("empty_year", "empty_month", "empty_day")
)
# Rendering the default state with empty_label tuple.
self.assertHTMLEqual(
w.render("mydate", ""),
"""
<select name="mydate_month" id="id_mydate_month">
<option selected value="">empty_month</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option selected value="">empty_day</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option selected value="">empty_year</option>
<option value="2014">2014</option>
</select>
""",
)
with self.assertRaisesMessage(
ValueError, "empty_label list/tuple must have 3 elements."
):
SelectDateWidget(years=("2014",), empty_label=("not enough", "values"))
@translation.override("nl")
def test_l10n(self):
w = SelectDateWidget(
years=(
"2007",
"2008",
"2009",
"2010",
"2011",
"2012",
"2013",
"2014",
"2015",
"2016",
)
)
self.assertEqual(
w.value_from_datadict(
{"date_year": "2010", "date_month": "8", "date_day": "13"}, {}, "date"
),
"13-08-2010",
)
self.assertHTMLEqual(
w.render("date", "13-08-2010"),
"""
<select name="date_day" id="id_date_day">
<option value="">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected>13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected>augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected>2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
""",
)
# Even with an invalid date, the widget should reflect the entered
# value.
self.assertEqual(w.render("mydate", "2010-02-30").count("selected"), 3)
# Years before 1900 should work.
w = SelectDateWidget(years=("1899",))
self.assertEqual(
w.value_from_datadict(
{"date_year": "1899", "date_month": "8", "date_day": "13"}, {}, "date"
),
"13-08-1899",
)
# And years before 1000 (demonstrating the need for
# sanitize_strftime_format).
w = SelectDateWidget(years=("0001",))
self.assertEqual(
w.value_from_datadict(
{"date_year": "0001", "date_month": "8", "date_day": "13"}, {}, "date"
),
"13-08-0001",
)
@override_settings(DATE_INPUT_FORMATS=["%d.%m.%Y"])
def test_custom_input_format(self):
w = SelectDateWidget(years=("0001", "1899", "2009", "2010"))
with translation.override(None):
for values, expected_value in (
(("0001", "8", "13"), "13.08.0001"),
(("1899", "7", "11"), "11.07.1899"),
(("2009", "3", "7"), "07.03.2009"),
):
with self.subTest(values=values):
data = {
"field_%s" % field: value
for field, value in zip(("year", "month", "day"), values)
}
self.assertEqual(
w.value_from_datadict(data, {}, "field"), expected_value
)
expected_dict = {
field: int(value)
for field, value in zip(("year", "month", "day"), values)
}
self.assertEqual(w.format_value(expected_value), expected_dict)
def test_format_value(self):
valid_formats = [
"2000-1-1",
"2000-10-15",
"2000-01-01",
"2000-01-0",
"2000-0-01",
"2000-0-0",
"0-01-01",
"0-01-0",
"0-0-01",
"0-0-0",
]
for value in valid_formats:
year, month, day = (int(x) or "" for x in value.split("-"))
with self.subTest(value=value):
self.assertEqual(
self.widget.format_value(value),
{"day": day, "month": month, "year": year},
)
invalid_formats = [
"2000-01-001",
"2000-001-01",
"2-01-01",
"20-01-01",
"200-01-01",
"20000-01-01",
]
for value in invalid_formats:
with self.subTest(value=value):
self.assertEqual(
self.widget.format_value(value),
{"day": None, "month": None, "year": None},
)
def test_value_from_datadict(self):
tests = [
(("2000", "12", "1"), "2000-12-01"),
(("", "12", "1"), "0-12-1"),
(("2000", "", "1"), "2000-0-1"),
(("2000", "12", ""), "2000-12-0"),
(("", "", "", ""), None),
((None, "12", "1"), None),
(("2000", None, "1"), None),
(("2000", "12", None), None),
(
(str(sys.maxsize + 1), "12", "1"),
# PyPy does not raise OverflowError.
f"{sys.maxsize + 1}-12-1" if PYPY else "0-0-0",
),
]
for values, expected in tests:
with self.subTest(values=values):
data = {}
for field_name, value in zip(("year", "month", "day"), values):
if value is not None:
data["field_%s" % field_name] = value
self.assertEqual(
self.widget.value_from_datadict(data, {}, "field"), expected
)
def test_value_omitted_from_data(self):
self.assertIs(self.widget.value_omitted_from_data({}, {}, "field"), True)
self.assertIs(
self.widget.value_omitted_from_data({"field_month": "12"}, {}, "field"),
False,
)
self.assertIs(
self.widget.value_omitted_from_data({"field_year": "2000"}, {}, "field"),
False,
)
self.assertIs(
self.widget.value_omitted_from_data({"field_day": "1"}, {}, "field"), False
)
data = {"field_day": "1", "field_month": "12", "field_year": "2000"}
self.assertIs(self.widget.value_omitted_from_data(data, {}, "field"), False)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_years_rendered_without_separator(self):
widget = SelectDateWidget(years=(2007,))
self.check_html(
widget,
"mydate",
"",
html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option selected value="">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option selected value="">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option selected value="">---</option>
<option value="2007">2007</option>
</select>
"""
),
)
def test_fieldset(self):
class TestForm(Form):
template_name = "forms_tests/use_fieldset.html"
field = DateField(widget=self.widget)
form = TestForm()
self.assertIs(self.widget.use_fieldset, True)
self.assertHTMLEqual(
"<div><fieldset><legend>Field:</legend>"
'<select name="field_month" required id="id_field_month">'
'<option value="1">January</option><option value="2">February</option>'
'<option value="3">March</option><option value="4">April</option>'
'<option value="5">May</option><option value="6">June</option>'
'<option value="7">July</option><option value="8">August</option>'
'<option value="9">September</option><option value="10">October</option>'
'<option value="11">November</option><option value="12">December</option>'
'</select><select name="field_day" required id="id_field_day">'
'<option value="1">1</option><option value="2">2</option>'
'<option value="3">3</option><option value="4">4</option>'
'<option value="5">5</option><option value="6">6</option>'
'<option value="7">7</option><option value="8">8</option>'
'<option value="9">9</option><option value="10">10</option>'
'<option value="11">11</option><option value="12">12</option>'
'<option value="13">13</option><option value="14">14</option>'
'<option value="15">15</option><option value="16">16</option>'
'<option value="17">17</option><option value="18">18</option>'
'<option value="19">19</option><option value="20">20</option>'
'<option value="21">21</option><option value="22">22</option>'
'<option value="23">23</option><option value="24">24</option>'
'<option value="25">25</option><option value="26">26</option>'
'<option value="27">27</option><option value="28">28</option>'
'<option value="29">29</option><option value="30">30</option>'
'<option value="31">31</option></select>'
'<select name="field_year" required id="id_field_year">'
'<option value="2007">2007</option><option value="2008">2008</option>'
'<option value="2009">2009</option><option value="2010">2010</option>'
'<option value="2011">2011</option><option value="2012">2012</option>'
'<option value="2013">2013</option><option value="2014">2014</option>'
'<option value="2015">2015</option><option value="2016">2016</option>'
"</select></fieldset></div>",
form.render(),
)
| SelectDateWidgetTest |
python | getsentry__sentry | src/sentry/api/serializers/release_details_types.py | {
"start": 360,
"end": 469
} | class ____(LastDeployOptional):
id: str
environment: str
dateFinished: str
name: str
| LastDeploy |
python | jazzband__django-simple-history | simple_history/registry_tests/tests.py | {
"start": 7178,
"end": 7529
} | class ____(TransactionTestCase):
def test_makemigration_command(self):
management.call_command(
"makemigrations", "migration_test_app", stdout=StringIO()
)
def test_migrate_command(self):
management.call_command(
"migrate", "migration_test_app", fake=True, stdout=StringIO()
)
| TestMigrate |
python | dagster-io__dagster | python_modules/dagster-test/dagster_test/toys/user_computed_data_versions/external_system.py | {
"start": 674,
"end": 774
} | class ____(TypedDict):
code_version: str
input_data_versions: Mapping[str, str]
| ProvenanceSpec |
python | google__python-fire | fire/test_components.py | {
"start": 4556,
"end": 4714
} | class ____:
def reciprocal(self, divisor=10.0):
return 1.0 / divisor
def integer_reciprocal(self, divisor=10):
return 1.0 / divisor
| NumberDefaults |
python | doocs__leetcode | solution/2900-2999/2963.Count the Number of Good Partitions/Solution.py | {
"start": 0,
"end": 300
} | class ____:
def numberOfGoodPartitions(self, nums: List[int]) -> int:
last = {x: i for i, x in enumerate(nums)}
mod = 10**9 + 7
j, k = -1, 0
for i, x in enumerate(nums):
j = max(j, last[x])
k += i == j
return pow(2, k - 1, mod)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/init_ops_test.py | {
"start": 29231,
"end": 29728
} | class ____(test.TestCase):
def testNoDevice(self):
with ops.Graph().as_default():
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual(None, var.device)
self.assertDeviceEqual(None, var.initializer.device)
def testDevice(self):
with ops.Graph().as_default():
with ops.device("/job:ps"):
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual("/job:ps", var.device)
self.assertDeviceEqual("/job:ps", var.initializer.device)
| DeviceTest |
python | readthedocs__readthedocs.org | readthedocs/organizations/forms.py | {
"start": 5664,
"end": 5768
} | class ____(SettingsOverrideObject):
_default_class = OrganizationSignupFormBase
| OrganizationSignupForm |
python | huggingface__transformers | tests/models/blip/test_modeling_blip.py | {
"start": 1665,
"end": 4743
} | class ____:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=1e-10,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return BlipVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = BlipVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| BlipVisionModelTester |
python | pytorch__pytorch | torch/autograd/_functions/tensor.py | {
"start": 987,
"end": 2496
} | class ____(Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, tensor, sizes):
ctx.sizes = sizes
ctx.numel = reduce(operator.mul, sizes, 1)
if tensor.numel() != ctx.numel:
raise RuntimeError(
(
"requested resize to {} ({} elements in total), "
"but the given tensor has a size of {} ({} elements). "
"autograd's resize can only change the shape of a given "
"tensor, while preserving the number of elements. "
).format(
"x".join(map(str, sizes)),
ctx.numel,
"x".join(map(str, tensor.size())),
tensor.numel(),
)
)
ctx.input_sizes = tensor.size()
if tensor.is_quantized:
tensor.copy_(tensor)
return tensor.contiguous().view(*sizes)
if tensor.is_contiguous():
result = tensor.new(tensor).contiguous().view(*sizes)
return result
else:
return tensor.contiguous().view(*sizes)
@staticmethod
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
if grad_output.numel() != ctx.numel:
raise AssertionError(
f"Expected grad_output to have {ctx.numel} elements, but got {grad_output.numel()}"
)
return grad_output.contiguous().view(ctx.input_sizes), None
| Resize |
python | numba__numba | numba/core/target_extension.py | {
"start": 3880,
"end": 3992
} | class ____(Generic):
"""Mark the target as GPU, i.e. suitable for compilation on a GPU
target.
"""
| GPU |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_asb.py | {
"start": 8804,
"end": 11403
} | class ____:
def test_init(self):
"""
Test init by creating AzureServiceBusReceiveMessageOperator with task id, queue_name, message,
batch and asserting with values
"""
asb_receive_queue_operator = AzureServiceBusReceiveMessageOperator(
task_id="asb_receive_message_queue",
queue_name=QUEUE_NAME,
)
assert asb_receive_queue_operator.task_id == "asb_receive_message_queue"
assert asb_receive_queue_operator.queue_name == QUEUE_NAME
@mock.patch("airflow.providers.microsoft.azure.hooks.asb.MessageHook.get_conn")
def test_receive_message_queue(self, mock_get_conn):
"""
Test AzureServiceBusReceiveMessageOperator by mock connection, values
and the service bus receive message
"""
asb_receive_queue_operator = AzureServiceBusReceiveMessageOperator(
task_id="asb_receive_message_queue",
queue_name=QUEUE_NAME,
)
asb_receive_queue_operator.execute(None)
expected_calls = [
mock.call()
.__enter__()
.get_queue_receiver(QUEUE_NAME)
.__enter__()
.receive_messages(max_message_count=10, max_wait_time=5)
.get_queue_receiver(QUEUE_NAME)
.__exit__()
.mock_call()
.__exit__
]
mock_get_conn.assert_has_calls(expected_calls)
@mock.patch("airflow.providers.microsoft.azure.hooks.asb.MessageHook.get_conn")
def test_receive_message_queue_callback(self, mock_get_conn):
"""
Test AzureServiceBusReceiveMessageOperator by mock connection, values
and the service bus receive message
"""
mock_service_bus_message = ServiceBusMessage("Test message with context")
mock_get_conn.return_value.__enter__.return_value.get_queue_receiver.return_value.__enter__.return_value.receive_messages.return_value = [
mock_service_bus_message
]
messages_received = []
def message_callback(msg: ServiceBusMessage, context: Context):
messages_received.append(msg)
assert context is not None
print(msg)
asb_receive_queue_operator = AzureServiceBusReceiveMessageOperator(
task_id="asb_receive_message_queue", queue_name=QUEUE_NAME, message_callback=message_callback
)
asb_receive_queue_operator.execute(Context())
assert len(messages_received) == 1
assert messages_received[0] == mock_service_bus_message
| TestAzureServiceBusReceiveMessageOperator |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 1952,
"end": 2005
} | class ____:
def f(self) -> None:
pass
| Mixin |
python | python__mypy | mypy/dmypy_util.py | {
"start": 1209,
"end": 3006
} | class ____(TextIO):
"""Helper class to write to a connection instead of standard output."""
def __init__(self, server: IPCBase, output_key: str, isatty: bool) -> None:
self.server = server
self.output_key = output_key
self._isatty = isatty
def __enter__(self) -> TextIO:
return self
def __exit__(
self,
t: type[BaseException] | None,
value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def __iter__(self) -> Iterator[str]:
raise io.UnsupportedOperation
def __next__(self) -> str:
raise io.UnsupportedOperation
def close(self) -> None:
pass
def fileno(self) -> int:
raise OSError
def flush(self) -> None:
pass
def isatty(self) -> bool:
return self._isatty
def read(self, n: int = 0) -> str:
raise io.UnsupportedOperation
def readable(self) -> bool:
return False
def readline(self, limit: int = 0) -> str:
raise io.UnsupportedOperation
def readlines(self, hint: int = 0) -> list[str]:
raise io.UnsupportedOperation
def seek(self, offset: int, whence: int = 0) -> int:
raise io.UnsupportedOperation
def seekable(self) -> bool:
return False
def tell(self) -> int:
raise io.UnsupportedOperation
def truncate(self, size: int | None = 0) -> int:
raise io.UnsupportedOperation
def write(self, output: str) -> int:
resp: dict[str, Any] = {self.output_key: output}
send(self.server, resp)
return len(output)
def writable(self) -> bool:
return True
def writelines(self, lines: Iterable[str]) -> None:
for s in lines:
self.write(s)
| WriteToConn |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 5679,
"end": 5728
} | class ____(MROBase2, MROBase3):
pass
| MRODerived |
python | scipy__scipy | scipy/stats/tests/test_multivariate.py | {
"start": 16988,
"end": 20068
} | class ____:
"""Instantiate a multivariate normal integration problem with a special singular
covariance structure.
When covariance matrix is a correlation matrix where the off-diagonal entries
``covar[i, j] == -lambdas[i]*lambdas[j]`` for ``i != j``, and
``sum(lambdas**2 / (1+lambdas**2)) == 1``, then the matrix is singular, and
the multidimensional integral reduces to a simpler univariate integral that
can be numerically integrated fairly easily.
The lower bound must be infinite, though the upper bounds can be general.
References
----------
.. [1] Kwong, K.-S. (1995). "Evaluation of the one-sided percentage points of the
singular multivariate normal distribution." Journal of Statistical
Computation and Simulation, 51(2-4), 121-135. doi:10.1080/00949659508811627
"""
ndim : int
low : np.ndarray
high : np.ndarray
lambdas : np.ndarray
covar : np.ndarray
target_val : float
target_err : float
def __init__(self, ndim, high, lambdas):
self.ndim = ndim
self.high = high
self.lambdas = lambdas
self.low = np.full(ndim, -np.inf)
self.covar = -np.outer(self.lambdas, self.lambdas)
np.fill_diagonal(self.covar, 1.0)
self.find_target()
@classmethod
def generate_semiinfinite(cls, ndim, rng=None):
"""Singular lambdas, random upper bounds.
"""
rng = np.random.default_rng(rng)
high = rng.uniform(0.0, np.sqrt(ndim), size=ndim)
p = rng.dirichlet(np.full(ndim, 1.0))
lambdas = np.sqrt(p / (1-p)) * rng.choice([-1.0, 1.0], size=ndim)
self = cls(
ndim=ndim,
high=high,
lambdas=lambdas,
)
return self
def find_target(self, **kwds):
d = dict(
a=-9.0,
b=+9.0,
)
d.update(kwds)
self.target_val, self.target_err = quad(self.univariate_func, **d)
def _univariate_term(self, t):
denom = np.sqrt(1 + self.lambdas**2)
i1 = np.prod(
special.ndtr((self.high - 1j*self.lambdas*t[:, np.newaxis]) / denom),
axis=1,
)
i2 = np.prod(
special.ndtr((-self.high + 1j*self.lambdas*t[:, np.newaxis]) / denom),
axis=1,
)
# The imaginary part is an odd function, so it can be ignored; it will integrate
# out to 0.
return (i1 - (-1)**self.ndim * i2).real
def univariate_func(self, t):
t = np.atleast_1d(t)
return (norm_pdf(t) * self._univariate_term(t)).squeeze()
def plot_integrand(self):
"""Plot the univariate integrand and its component terms for understanding.
"""
from matplotlib import pyplot as plt
t = np.linspace(-9.0, 9.0, 1001)
plt.plot(t, norm_pdf(t), label=r'$\phi(t)$')
plt.plot(t, self._univariate_term(t), label=r'$f(t)$')
plt.plot(t, self.univariate_func(t), label=r'$f(t)*phi(t)$')
plt.ylim(-0.1, 1.1)
plt.legend()
| SingularMVNProblem |
python | pandas-dev__pandas | pandas/core/arrays/string_.py | {
"start": 10937,
"end": 17576
} | class ____(ExtensionArray):
"""
Mixin class for StringArray, ArrowStringArray.
"""
dtype: StringDtype
# TODO(4.0): Once the deprecation here is enforced, this method can be
# removed and we use the parent class method instead.
def _logical_method(self, other, op):
if (
op in (roperator.ror_, roperator.rand_, roperator.rxor)
and isinstance(other, np.ndarray)
and other.dtype == bool
):
# GH#60234 backward compatibility for the move to StringDtype in 3.0
op_name = op.__name__[1:].strip("_")
warnings.warn(
f"'{op_name}' operations between boolean dtype and {self.dtype} are "
"deprecated and will raise in a future version. Explicitly "
"cast the strings to a boolean dtype before operating instead.",
Pandas4Warning,
stacklevel=find_stack_level(),
)
return op(other, self.astype(bool))
return NotImplemented
@doc(ExtensionArray.tolist)
def tolist(self) -> list:
if self.ndim > 1:
return [x.tolist() for x in self]
return list(self.to_numpy())
def _formatter(self, boxed: bool = False):
formatter = partial(
printing.pprint_thing,
escape_chars=("\t", "\r", "\n"),
quote_strings=not boxed,
)
return formatter
def _str_map(
self,
f,
na_value=lib.no_default,
dtype: Dtype | None = None,
convert: bool = True,
):
if self.dtype.na_value is np.nan:
return self._str_map_nan_semantics(f, na_value=na_value, dtype=dtype)
from pandas.arrays import BooleanArray
if dtype is None:
dtype = self.dtype
if na_value is lib.no_default:
na_value = self.dtype.na_value
mask = isna(self)
arr = np.asarray(self)
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
constructor: type[IntegerArray | BooleanArray]
if is_integer_dtype(dtype):
constructor = IntegerArray
else:
constructor = BooleanArray
na_value_is_na = isna(na_value)
if na_value_is_na:
na_value = 1
elif dtype == np.dtype("bool"):
# GH#55736
na_value = bool(na_value)
result = lib.map_infer_mask(
arr,
f,
mask.view("uint8"),
convert=False,
na_value=na_value,
# error: Argument 1 to "dtype" has incompatible type
# "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected
# "Type[object]"
dtype=np.dtype(cast(type, dtype)),
)
if not na_value_is_na:
mask[:] = False
return constructor(result, mask)
else:
return self._str_map_str_or_object(dtype, na_value, arr, f, mask)
def _str_map_str_or_object(
self,
dtype,
na_value,
arr: np.ndarray,
f,
mask: npt.NDArray[np.bool_],
):
# _str_map helper for case where dtype is either string dtype or object
if is_string_dtype(dtype) and not is_object_dtype(dtype):
# i.e. StringDtype
result = lib.map_infer_mask(
arr, f, mask.view("uint8"), convert=False, na_value=na_value
)
if self.dtype.storage == "pyarrow":
import pyarrow as pa
# TODO: shouldn't this already be caught my passed mask?
# it isn't in test_extract_expand_capture_groups_index
# mask = mask | np.array(
# [x is libmissing.NA for x in result], dtype=bool
# )
result = pa.array(
result, mask=mask, type=pa.large_string(), from_pandas=True
)
# error: "BaseStringArray" has no attribute "_from_pyarrow_array"
return self._from_pyarrow_array(result) # type: ignore[attr-defined]
else:
# StringArray
# error: Too many arguments for "BaseStringArray"
return type(self)(result, dtype=self.dtype) # type: ignore[call-arg]
else:
# This is when the result type is object. We reach this when
# -> We know the result type is truly object (e.g. .encode returns bytes
# or .findall returns a list).
# -> We don't know the result type. E.g. `.get` can return anything.
return lib.map_infer_mask(arr, f, mask.view("uint8"))
def _str_map_nan_semantics(
self, f, na_value=lib.no_default, dtype: Dtype | None = None
):
if dtype is None:
dtype = self.dtype
if na_value is lib.no_default:
if is_bool_dtype(dtype):
# NaN propagates as False
na_value = False
else:
na_value = self.dtype.na_value
mask = isna(self)
arr = np.asarray(self)
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
na_value_is_na = isna(na_value)
if na_value_is_na:
if is_integer_dtype(dtype):
na_value = 0
else:
# NaN propagates as False
na_value = False
result = lib.map_infer_mask(
arr,
f,
mask.view("uint8"),
convert=False,
na_value=na_value,
dtype=np.dtype(cast(type, dtype)),
)
if na_value_is_na and is_integer_dtype(dtype) and mask.any():
# TODO: we could alternatively do this check before map_infer_mask
# and adjust the dtype/na_value we pass there. Which is more
# performant?
result = result.astype("float64")
result[mask] = np.nan
return result
else:
return self._str_map_str_or_object(dtype, na_value, arr, f, mask)
def view(self, dtype: Dtype | None = None) -> Self:
if dtype is not None:
raise TypeError("Cannot change data-type for string array.")
return super().view()
@set_module("pandas.arrays")
# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
# incompatible with definition in base class "ExtensionArray"
| BaseStringArray |
python | kamyu104__LeetCode-Solutions | Python/sum-of-mutated-array-closest-to-target.py | {
"start": 33,
"end": 750
} | class ____(object):
def findBestValue(self, arr, target):
"""
:type arr: List[int]
:type target: int
:rtype: int
"""
arr.sort(reverse=True)
max_arr = arr[0]
while arr and arr[-1]*len(arr) <= target:
target -= arr.pop()
# let x = ceil(t/n)-1
# (1) (t/n-1/2) <= x:
# return x, which is equal to ceil(t/n)-1 = ceil(t/n-1/2) = (2t+n-1)//2n
# (2) (t/n-1/2) > x:
# return x+1, which is equal to ceil(t/n) = ceil(t/n-1/2) = (2t+n-1)//2n
# (1) + (2) => both return (2t+n-1)//2n
return max_arr if not arr else (2*target+len(arr)-1)//(2*len(arr))
# Time: O(nlogn)
# Space: O(1)
| Solution |
python | Netflix__metaflow | metaflow/runner/nbrun.py | {
"start": 159,
"end": 287
} | class ____(Exception):
"""Custom exception for errors during NBRunner initialization."""
pass
| NBRunnerInitializationError |
python | fastai__fastai | fastai/callback/tracker.py | {
"start": 3835,
"end": 6058
} | class ____(TrackerCallback):
"A `TrackerCallback` that saves the model's best during training and loads it at the end."
order = TrackerCallback.order+1
def __init__(self,
monitor='valid_loss', # value (usually loss or metric) being monitored.
comp=None, # numpy comparison operator; np.less if monitor is loss, np.greater if monitor is metric.
min_delta=0., # minimum delta between the last monitor value and the best monitor value.
fname='model', # model name to be used when saving model.
every_epoch=False, # if true, save model after every epoch; else save only when model is better than existing best.
at_end=False, # if true, save model when training ends; else load best model if there is only one saved model.
with_opt=False, # if true, save optimizer state (if any available) when saving model.
reset_on_fit=True # before model fitting, reset value being monitored to -infinity (if monitor is metric) or +infinity (if monitor is loss).
):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
assert not (every_epoch and at_end), "every_epoch and at_end cannot both be set to True"
# keep track of file path for loggers
self.last_saved_path = None
store_attr('fname,every_epoch,at_end,with_opt')
def _save(self, name): self.last_saved_path = self.learn.save(name, with_opt=self.with_opt)
def after_epoch(self):
"Compare the value monitored to its best score and save if best."
if self.every_epoch:
if (self.epoch%self.every_epoch) == 0: self._save(f'{self.fname}_{self.epoch}')
else: #every improvement
super().after_epoch()
if self.new_best:
print(f'Better model found at epoch {self.epoch} with {self.monitor} value: {self.best}.')
self._save(f'{self.fname}')
def after_fit(self, **kwargs):
"Load the best model."
if self.at_end: self._save(f'{self.fname}')
elif not self.every_epoch: self.learn.load(f'{self.fname}', with_opt=self.with_opt, weights_only=False)
# %% ../../nbs/17_callback.tracker.ipynb 30
| SaveModelCallback |
python | walkccc__LeetCode | solutions/3261. Count Substrings That Satisfy K-Constraint II/3261.py | {
"start": 0,
"end": 1973
} | class ____:
def countKConstraintSubstrings(
self,
s: str,
k: int,
queries: list[list[int]]
) -> list[int]:
n = len(s)
ans = []
count = [0, 0]
# leftToRight[l] := the maximum right index r s.t. s[l..r] is valid
leftToRight = [0] * n
# rightToLeft[r] := the minimum left index l s.t. s[l..r] is valid
rightToLeft = [0] * n
l = 0
for r in range(n):
count[int(s[r])] += 1
while min(count) > k:
count[int(s[l])] -= 1
l += 1
rightToLeft[r] = l
count = [0, 0]
r = n - 1
for l in reversed(range(n)):
count[int(s[l])] += 1
while min(count) > k:
count[int(s[r])] -= 1
r -= 1
leftToRight[l] = r
# prefix[i] := the number of valid substrings ending in [0..i - 1].
prefix = list(itertools.accumulate((r - l + 1
for r, l in enumerate(rightToLeft)),
initial=0))
for l, r in queries:
if r > leftToRight[l]:
# If r is beyond leftToRight[l], compute the number of valid substrings
# from l to leftToRight[l] and add the number of valid substrings
# ending in [leftToRight[l] + 1..r].
#
# prefix[r + 1] := the number of valid substrings ending in [0..r].
# prefix[leftToRight[l] + 1] := the number of valid substrings ending
# in [0..leftToRight].
# => prefix[r + 1] - prefix[leftToRight[l] + 1] := the number of valid
# substrings ending in [leftToRight[l] + 1..r].
sz = leftToRight[l] - l + 1
numValidSubstrings = sz * (sz + 1) // 2 + (
prefix[r + 1] - prefix[leftToRight[l] + 1])
else:
# If r is within the range of leftToRight[l], compute the number of
# valid substrings directly from l to r.
sz = r - l + 1
numValidSubstrings = sz * (sz + 1) // 2
ans.append(numValidSubstrings)
return ans
| Solution |
python | ray-project__ray | python/ray/dashboard/modules/job/tests/test_cli_integration.py | {
"start": 2640,
"end": 3761
} | class ____:
"""
Integration version of job CLI test that ensures interaction with the
following components are working as expected:
1) Ray client: use of RAY_JOB_HEADERS and ray.init() in job_head.py
2) Ray dashboard: `ray start --head`
"""
def test_empty_ray_job_headers(self, ray_start_stop):
with set_env_var("RAY_JOB_HEADERS", None):
stdout, _ = _run_cmd("ray job submit -- echo hello")
assert "hello" in stdout
assert "succeeded" in stdout
@pytest.mark.parametrize("ray_job_headers", ['{"key": "value"}'])
def test_ray_job_headers(self, ray_start_stop, ray_job_headers: str):
with set_env_var("RAY_JOB_HEADERS", ray_job_headers):
_run_cmd("ray job submit -- echo hello", should_fail=False)
@pytest.mark.parametrize("ray_job_headers", ["{key value}"])
def test_ray_incorrectly_formatted_job_headers(
self, ray_start_stop, ray_job_headers: str
):
with set_env_var("RAY_JOB_HEADERS", ray_job_headers):
_run_cmd("ray job submit -- echo hello", should_fail=True)
| TestRayJobHeaders |
python | getsentry__sentry | tests/sentry/issues/test_priority.py | {
"start": 638,
"end": 6321
} | class ____(TestCase):
def assert_activity_grouphistory_set(
self, group: Group, priority: PriorityLevel, reason: PriorityChangeReason
) -> None:
activity = Activity.objects.filter(
group=group, type=ActivityType.SET_PRIORITY.value
).order_by("-datetime")[0]
assert activity.data == {
"priority": priority.to_str(),
"reason": reason.value,
}
grouphistory = GroupHistory.objects.filter(group=group).order_by("-date_added")[0]
assert grouphistory.status == PRIORITY_TO_GROUP_HISTORY_STATUS[priority]
def test_updates_priority_escalating(self) -> None:
self.group = self.create_group(
status=GroupStatus.IGNORED,
priority=PriorityLevel.LOW,
)
auto_update_priority(self.group, PriorityChangeReason.ESCALATING)
assert self.group.priority == PriorityLevel.MEDIUM
self.assert_activity_grouphistory_set(
self.group, PriorityLevel.MEDIUM, PriorityChangeReason.ESCALATING
)
def test_updates_priority_escalating_no_status(self) -> None:
self.group = self.create_group(
status=GroupStatus.IGNORED,
priority=None,
)
auto_update_priority(self.group, PriorityChangeReason.ESCALATING)
assert self.group.priority == PriorityLevel.HIGH
self.assert_activity_grouphistory_set(
self.group, PriorityLevel.HIGH, PriorityChangeReason.ESCALATING
)
def test_updates_priority_escalating_remains_high(self) -> None:
self.group = self.create_group(
status=GroupStatus.IGNORED,
priority=PriorityLevel.HIGH,
)
auto_update_priority(self.group, PriorityChangeReason.ESCALATING)
assert self.group.priority == PriorityLevel.HIGH
assert not Activity.objects.filter(
group=self.group, type=ActivityType.SET_PRIORITY.value
).exists()
assert not GroupHistory.objects.filter(
group=self.group, status=GroupHistoryStatus.PRIORITY_HIGH
).exists()
def test_skips_if_priority_locked(self) -> None:
self.group = self.create_group(
status=GroupStatus.IGNORED,
priority=PriorityLevel.LOW,
priority_locked_at=before_now(days=1),
)
auto_update_priority(self.group, PriorityChangeReason.ESCALATING)
assert self.group.priority == PriorityLevel.LOW
assert Activity.objects.filter(group=self.group).count() == 0
assert GroupHistory.objects.filter(group=self.group).count() == 0
def test_updates_priority_ongoing(self) -> None:
self.group = self.create_group(
status=GroupStatus.UNRESOLVED,
substatus=GroupSubStatus.NEW,
priority=PriorityLevel.LOW,
)
self.group.data.get("metadata", {})["initial_priority"] = PriorityLevel.LOW
auto_update_priority(self.group, PriorityChangeReason.ESCALATING)
auto_update_priority(self.group, PriorityChangeReason.ONGOING)
self.group.refresh_from_db()
assert self.group.priority == PriorityLevel.LOW
self.assert_activity_grouphistory_set(
self.group, PriorityLevel.LOW, PriorityChangeReason.ONGOING
)
@patch("sentry.issues.priority.logger.error")
def test_updates_priority_ongoing_no_initial_priority(self, mock_logger: MagicMock) -> None:
self.group = self.create_group(
status=GroupStatus.RESOLVED,
)
self.group.data.get("metadata", {})["initial_priority"] = None
self.group.save()
auto_update_priority(self.group, PriorityChangeReason.ONGOING)
mock_logger.assert_called_with(
"get_priority_for_ongoing_group.initial_priority_not_found",
extra={"group": self.group.id},
)
assert not self.group.priority
assert Activity.objects.filter(group=self.group).count() == 0
assert GroupHistory.objects.filter(group=self.group).count() == 0
@patch("sentry.issues.attributes.send_snapshot_values")
def test_priority_update_sends_snapshot(self, mock_send_snapshot_values: MagicMock) -> None:
self.group = self.create_group(
status=GroupStatus.UNRESOLVED,
substatus=GroupSubStatus.ONGOING,
priority=PriorityLevel.HIGH,
)
update_priority(
group=self.group,
priority=PriorityLevel.MEDIUM,
sender="test",
reason=PriorityChangeReason.ONGOING,
project=self.project,
)
assert self.group.priority == PriorityLevel.MEDIUM
mock_send_snapshot_values.assert_called_with(None, self.group, False)
def test_user_updates_priority_locked_group(self) -> None:
self.group = self.create_group(
status=GroupStatus.UNRESOLVED,
substatus=GroupSubStatus.ONGOING,
priority=PriorityLevel.HIGH,
)
handle_priority(
priority=PriorityLevel.MEDIUM.to_str(),
group_list=[self.group],
acting_user=self.user,
project_lookup={self.group.project_id: self.project},
)
assert self.group.priority == PriorityLevel.MEDIUM
assert self.group.priority_locked_at is not None
handle_priority(
priority=PriorityLevel.LOW.to_str(),
group_list=[self.group],
acting_user=self.user,
project_lookup={self.group.project_id: self.project},
)
assert self.group.priority == PriorityLevel.LOW
assert self.group.priority_locked_at is not None
| TestUpdatesPriority |
python | django__django | django/template/context.py | {
"start": 6539,
"end": 9482
} | class ____(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in the engine's configuration.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(
self,
request,
dict_=None,
processors=None,
use_l10n=None,
use_tz=None,
autoescape=True,
):
super().__init__(dict_, use_l10n=use_l10n, use_tz=use_tz, autoescape=autoescape)
self.request = request
self._processors = () if processors is None else tuple(processors)
self._processors_index = len(self.dicts)
# placeholder for context processors output
self.update({})
# empty dict for any new modifications
# (so that context processors don't overwrite them)
self.update({})
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = template.engine.template_context_processors + self._processors
updates = {}
for processor in processors:
context = processor(self.request)
try:
updates.update(context)
except TypeError as e:
raise TypeError(
f"Context processor {processor.__qualname__} didn't return a "
"dictionary."
) from e
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
def new(self, values=None):
new_context = super().new(values)
# This is for backwards-compatibility: RequestContexts created via
# Context.new don't include values from context processors.
if hasattr(new_context, "_processors_index"):
del new_context._processors_index
return new_context
def make_context(context, request=None, **kwargs):
"""
Create a suitable Context from a plain dict and optionally an HttpRequest.
"""
if context is not None and not isinstance(context, dict):
raise TypeError(
"context must be a dict rather than %s." % context.__class__.__name__
)
if request is None:
context = Context(context, **kwargs)
else:
# The following pattern is required to ensure values from
# context override those from template context processors.
original_context = context
context = RequestContext(request, **kwargs)
if original_context:
context.push(original_context)
return context
| RequestContext |
python | pytorch__pytorch | torch/utils/data/datapipes/utils/decoder.py | {
"start": 10100,
"end": 12123
} | class ____:
"""
Decode key/data sets using a list of handlers.
For each key/data item, this iterates through the list of
handlers until some handler returns something other than None.
"""
def __init__(self, *handler, key_fn=extension_extract_fn) -> None:
self.handlers = list(handler) if handler else []
self.key_fn = key_fn
# Insert new handler from the beginning of handlers list to make sure the new
# handler having the highest priority
def add_handler(self, *handler) -> None:
if not handler:
return
self.handlers = list(handler) + self.handlers
@staticmethod
def _is_stream_handle(data):
obj_to_check = data.file_obj if isinstance(data, StreamWrapper) else data
return isinstance(obj_to_check, (io.BufferedIOBase, io.RawIOBase))
def decode1(self, key, data):
if not data:
return data
# if data is a stream handle, we need to read all the content before decoding
if Decoder._is_stream_handle(data):
ds = data
# The behavior of .read can differ between streams (e.g. HTTPResponse), hence this is used instead
data = b"".join(data)
ds.close()
for f in self.handlers:
result = f(key, data)
if result is not None:
return result
return data
def decode(self, data):
result = {}
# single data tuple(pathname, data stream)
if isinstance(data, tuple):
data = [data]
if data is not None:
for k, v in data:
# TODO: xinyu, figure out why Nvidia do this?
if k[0] == "_":
if isinstance(v, bytes):
v = v.decode("utf-8")
result[k] = v
continue
result[k] = self.decode1(self.key_fn(k), v)
return result
def __call__(self, data):
return self.decode(data)
| Decoder |
python | pypa__pipenv | pipenv/patched/pip/_internal/models/format_control.py | {
"start": 186,
"end": 2516
} | class ____:
"""Helper for managing formats from which a package can be installed."""
__slots__ = ["no_binary", "only_binary"]
def __init__(
self,
no_binary: Optional[Set[str]] = None,
only_binary: Optional[Set[str]] = None,
) -> None:
if no_binary is None:
no_binary = set()
if only_binary is None:
only_binary = set()
self.no_binary = no_binary
self.only_binary = only_binary
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
if self.__slots__ != other.__slots__:
return False
return all(getattr(self, k) == getattr(other, k) for k in self.__slots__)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.no_binary}, {self.only_binary})"
@staticmethod
def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> None:
if value.startswith("-"):
raise CommandError(
"--no-binary / --only-binary option requires 1 argument."
)
new = value.split(",")
while ":all:" in new:
other.clear()
target.clear()
target.add(":all:")
del new[: new.index(":all:") + 1]
# Without a none, we want to discard everything as :all: covers it
if ":none:" not in new:
return
for name in new:
if name == ":none:":
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def get_allowed_formats(self, canonical_name: str) -> FrozenSet[str]:
result = {"binary", "source"}
if canonical_name in self.only_binary:
result.discard("source")
elif canonical_name in self.no_binary:
result.discard("binary")
elif ":all:" in self.only_binary:
result.discard("source")
elif ":all:" in self.no_binary:
result.discard("binary")
return frozenset(result)
def disallow_binaries(self) -> None:
self.handle_mutual_excludes(
":all:",
self.no_binary,
self.only_binary,
)
| FormatControl |
python | scipy__scipy | scipy/stats/tests/test_kdeoth.py | {
"start": 10058,
"end": 10221
} | class ____(stats.gaussian_kde):
def __init__(self, dataset):
self.covariance_factor = self.scotts_factor
super().__init__(dataset)
| _kde_subclass2 |
python | walkccc__LeetCode | solutions/515. Find Largest Value in Each Tree Row/515-2.py | {
"start": 0,
"end": 401
} | class ____:
def largestValues(self, root: TreeNode | None) -> list[int]:
ans = []
def dfs(root: TreeNode | None, depth: int) -> None:
if not root:
return
if depth + 1 > len(ans):
ans.append(root.val)
else:
ans[depth] = max(ans[depth], root.val)
dfs(root.left, depth + 1)
dfs(root.right, depth + 1)
dfs(root, 0)
return ans
| Solution |
python | django__django | tests/composite_pk/models/tenant.py | {
"start": 141,
"end": 418
} | class ____(models.Model):
pk = models.CompositePrimaryKey("tenant_id", "id")
tenant = models.ForeignKey(Tenant, on_delete=models.CASCADE, related_name="tokens")
id = models.SmallIntegerField()
secret = models.CharField(max_length=10, default="", blank=True)
| Token |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_sum_values_to_be_between.py | {
"start": 875,
"end": 2663
} | class ____(MulticolumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "multicolumn_values.sum_values_to_be_between_max_and_min"
# These point your metric at the provided keys to facilitate calculation
condition_domain_keys = (
"batch_id",
"table",
"column_list",
"row_condition",
"condition_parser",
"ignore_row_if",
)
condition_value_keys = (
"min_value",
"max_value",
)
# This method implements the core logic for the PandasExecutionEngine
@multicolumn_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_list, min_value, max_value, **kwargs):
sum_of_columns = column_list.sum(axis=1)
return (sum_of_columns >= min_value) & (sum_of_columns <= max_value)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column_list, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
@multicolumn_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column_list, min_value, max_value, **kwargs):
columns_to_sum = column_list.columns
return (
functools.reduce(operator.add, [F.col(column) for column in columns_to_sum])
>= F.lit(min_value)
) & (
functools.reduce(operator.add, [F.col(column) for column in columns_to_sum])
<= F.lit(max_value)
)
# This class defines the Expectation itself
| MulticolumnValuesSumValuesToBeBetweenMaxAndMin |
python | ansible__ansible | lib/ansible/module_utils/facts/packages.py | {
"start": 2743,
"end": 3957
} | class ____(LibMgr):
CLI_BINARIES = [] # type: t.List[str]
INTERPRETERS = ['/usr/bin/python3']
def is_available(self, handle_exceptions=True):
if super(RespawningLibMgr, self).is_available():
return True
for binary in self.CLI_BINARIES:
try:
bin_path = get_bin_path(binary)
except ValueError:
# Not an interesting exception to raise, just a speculative probe
continue
else:
# It looks like this package manager is installed
if not has_respawned():
# See if respawning will help
interpreter_path = probe_interpreters_for_module(self.INTERPRETERS, self.LIB)
if interpreter_path:
respawn_module(interpreter_path)
# The module will exit when the respawned copy completes
if not handle_exceptions:
raise Exception(f'Found executable at {bin_path}. {missing_required_lib(self.LIB)}')
if not handle_exceptions:
raise Exception(missing_required_lib(self.LIB))
return False
| RespawningLibMgr |
python | pallets__werkzeug | src/werkzeug/debug/tbtools.py | {
"start": 6086,
"end": 10540
} | class ____:
__slots__ = ("_te", "_cache_all_tracebacks", "_cache_all_frames")
def __init__(
self,
exc: BaseException,
te: traceback.TracebackException | None = None,
*,
skip: int = 0,
hide: bool = True,
) -> None:
self._te = _process_traceback(exc, te, skip=skip, hide=hide)
def __str__(self) -> str:
return f"<{type(self).__name__} {self._te}>"
@cached_property
def all_tracebacks(
self,
) -> list[tuple[str | None, traceback.TracebackException]]:
out: list[tuple[str | None, traceback.TracebackException]] = []
current: traceback.TracebackException | None = self._te
while current is not None:
if current.__cause__ is not None:
chained_msg = (
"The above exception was the direct cause of the"
" following exception"
)
chained_exc = current.__cause__
elif current.__context__ is not None and not current.__suppress_context__:
chained_msg = (
"During handling of the above exception, another exception occurred"
)
chained_exc = current.__context__
else:
chained_msg = None
chained_exc = None
out.append((chained_msg, current))
current = chained_exc
return out
@cached_property
def all_frames(self) -> list[DebugFrameSummary]:
return [
f # type: ignore[misc]
for _, te in self.all_tracebacks
for f in te.stack
]
def render_traceback_text(self) -> str:
return "".join(self._te.format())
def render_traceback_html(self, include_title: bool = True) -> str:
library_frames = [f.is_library for f in self.all_frames]
mark_library = 0 < sum(library_frames) < len(library_frames)
rows = []
if not library_frames:
classes = "traceback noframe-traceback"
else:
classes = "traceback"
for msg, current in reversed(self.all_tracebacks):
row_parts = []
if msg is not None:
row_parts.append(f'<li><div class="exc-divider">{msg}:</div>')
for frame in current.stack:
frame = t.cast(DebugFrameSummary, frame)
info = f' title="{escape(frame.info)}"' if frame.info else ""
row_parts.append(f"<li{info}>{frame.render_html(mark_library)}")
rows.append("\n".join(row_parts))
if sys.version_info < (3, 13):
exc_type_str = self._te.exc_type.__name__
else:
exc_type_str = self._te.exc_type_str
is_syntax_error = exc_type_str == "SyntaxError"
if include_title:
if is_syntax_error:
title = "Syntax Error"
else:
title = "Traceback <em>(most recent call last)</em>:"
else:
title = ""
exc_full = escape("".join(self._te.format_exception_only()))
if is_syntax_error:
description = f"<pre class=syntaxerror>{exc_full}</pre>"
else:
description = f"<blockquote>{exc_full}</blockquote>"
return SUMMARY_HTML % {
"classes": classes,
"title": f"<h3>{title}</h3>",
"frames": "\n".join(rows),
"description": description,
}
def render_debugger_html(
self, evalex: bool, secret: str, evalex_trusted: bool
) -> str:
exc_lines = list(self._te.format_exception_only())
plaintext = "".join(self._te.format())
if sys.version_info < (3, 13):
exc_type_str = self._te.exc_type.__name__
else:
exc_type_str = self._te.exc_type_str
return PAGE_HTML % {
"evalex": "true" if evalex else "false",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "false",
"title": escape(exc_lines[0]),
"exception": escape("".join(exc_lines)),
"exception_type": escape(exc_type_str),
"summary": self.render_traceback_html(include_title=False),
"plaintext": escape(plaintext),
"plaintext_cs": re.sub("-{2,}", "-", plaintext),
"secret": secret,
}
| DebugTraceback |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_drypython_returns.py | {
"start": 2245,
"end": 2289
} | class ____(Generic[A, B]):
pass
| _FirstBase |
python | python-markdown__markdown | markdown/preprocessors.py | {
"start": 1585,
"end": 2266
} | class ____(util.Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a `run` method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend `Preprocessor`.
"""
def run(self, lines: list[str]) -> list[str]:
"""
Each subclass of `Preprocessor` should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass # pragma: no cover
| Preprocessor |
python | kamyu104__LeetCode-Solutions | Python/n-th-tribonacci-number.py | {
"start": 51,
"end": 870
} | class ____(object):
def tribonacci(self, n):
"""
:type n: int
:rtype: int
"""
def matrix_expo(A, K):
result = [[int(i==j) for j in xrange(len(A))] \
for i in xrange(len(A))]
while K:
if K % 2:
result = matrix_mult(result, A)
A = matrix_mult(A, A)
K /= 2
return result
def matrix_mult(A, B):
ZB = zip(*B)
return [[sum(a*b for a, b in itertools.izip(row, col)) \
for col in ZB] for row in A]
T = [[1, 1, 0],
[1, 0, 1],
[1, 0, 0]]
return matrix_mult([[1, 0, 0]], matrix_expo(T, n))[0][1] # [a1, a0, a(-1)] * T^n
# Time: O(n)
# Space: O(1)
| Solution |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/metadata/pipeline.py | {
"start": 8062,
"end": 10546
} | class ____(PoetryRunStep):
def __init__(self, context: PipelineContext) -> None:
super().__init__(
context=context,
title="Test Metadata Orchestrator",
parent_dir_path="airbyte-ci/connectors/metadata_service",
module_path="orchestrator",
poetry_run_args=["pytest"],
)
# PIPELINES
async def run_metadata_orchestrator_deploy_pipeline(
ctx: click.Context,
is_local: bool,
git_branch: str,
git_revision: str,
diffed_branch: str,
git_repo_url: str,
report_output_prefix: str,
gha_workflow_run_url: Optional[str],
dagger_logs_url: Optional[str],
pipeline_start_timestamp: Optional[int],
ci_context: Optional[str],
) -> bool:
success: bool = False
metadata_pipeline_context = PipelineContext(
pipeline_name="Metadata Service Orchestrator Deploy Pipeline",
is_local=is_local,
git_branch=git_branch,
git_revision=git_revision,
diffed_branch=diffed_branch,
git_repo_url=git_repo_url,
report_output_prefix=report_output_prefix,
gha_workflow_run_url=gha_workflow_run_url,
dagger_logs_url=dagger_logs_url,
pipeline_start_timestamp=pipeline_start_timestamp,
ci_context=ci_context,
)
async with dagger.Connection(DAGGER_CONFIG) as dagger_client:
metadata_pipeline_context.dagger_client = dagger_client
async with metadata_pipeline_context:
steps: STEP_TREE = [
[
StepToRun(
id=CONNECTOR_TEST_STEP_ID.TEST_ORCHESTRATOR,
step=TestOrchestrator(context=metadata_pipeline_context),
)
],
[
StepToRun(
id=CONNECTOR_TEST_STEP_ID.DEPLOY_ORCHESTRATOR,
step=DeployOrchestrator(context=metadata_pipeline_context),
depends_on=[CONNECTOR_TEST_STEP_ID.TEST_ORCHESTRATOR],
)
],
]
steps_results = await run_steps(steps)
report = Report(
pipeline_context=metadata_pipeline_context,
steps_results=list(steps_results.values()),
name="METADATA ORCHESTRATOR DEPLOY RESULTS",
)
metadata_pipeline_context.report = report
success = report.success
return success
| TestOrchestrator |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeature.py | {
"start": 112,
"end": 1113
} | class ____(object):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
pass
@abstractmethod
def _calculate(cls, X, y, logger, feat_type):
pass
def __call__(self, X, y, logger, feat_type=None):
if feat_type is None:
feat_type = {i: "numerical" for i in range(X.shape[1])}
starttime = time.time()
try:
if scipy.sparse.issparse(X) and hasattr(self, "_calculate_sparse"):
value = self._calculate_sparse(X, y, logger, feat_type)
else:
value = self._calculate(X, y, logger, feat_type)
comment = ""
except MemoryError:
value = None
comment = "Memory Error"
endtime = time.time()
return MetaFeatureValue(
self.__class__.__name__,
self.type_,
0,
0,
value,
endtime - starttime,
comment=comment,
)
| AbstractMetaFeature |
python | huggingface__transformers | src/transformers/models/luke/tokenization_luke.py | {
"start": 6126,
"end": 83836
} | class ____(TokenizersBackend):
"""
Constructs a LUKE tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import LukeTokenizer
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base")
>>> tokenizer("Hello world")["input_ids"]
[0, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[0, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods. It also creates entity sequences, namely
`entity_ids`, `entity_attention_mask`, `entity_token_type_ids`, and `entity_position_ids` to be used by the LUKE
model.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
entity_vocab_file (`str`):
Path to the entity vocabulary file.
task (`str`, *optional*):
Task for which you want to prepare sequences. One of `"entity_classification"`,
`"entity_pair_classification"`, or `"entity_span_classification"`. If you specify this argument, the entity
sequence is automatically created based on the given entity span(s).
max_entity_length (`int`, *optional*, defaults to 32):
The maximum length of `entity_ids`.
max_mention_length (`int`, *optional*, defaults to 30):
The maximum number of tokens inside an entity span.
entity_token_1 (`str`, *optional*, defaults to `<ent>`):
The special token used to represent an entity span in a word token sequence. This token is only used when
`task` is set to `"entity_classification"` or `"entity_pair_classification"`.
entity_token_2 (`str`, *optional*, defaults to `<ent2>`):
The special token used to represent an entity span in a word token sequence. This token is only used when
`task` is set to `"entity_pair_classification"`.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (LUKE tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=False,
task=None,
max_entity_length=32,
max_mention_length=30,
entity_token_1="<ent>",
entity_token_2="<ent2>",
entity_unk_token="[UNK]",
entity_pad_token="[PAD]",
entity_mask_token="[MASK]",
entity_mask2_token="[MASK2]",
vocab: Optional[dict] = None,
merges: Optional[list] = None,
entity_vocab: Optional[dict] = None,
**kwargs,
):
self.add_prefix_space = add_prefix_space
# Handle entity vocab file for backward compatibility
entity_vocab_file = kwargs.pop("entity_vocab_file", None)
# Check if vocab/merges/entity_vocab are in kwargs
if vocab is None and "vocab" in kwargs:
vocab = kwargs.pop("vocab")
if merges is None and "merges" in kwargs:
merges = kwargs.pop("merges")
if entity_vocab is None and "entity_vocab" in kwargs:
entity_vocab = kwargs.pop("entity_vocab")
# Build vocab and merges (either from data or empty, like GPT2Tokenizer)
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
self._vocab = {}
if merges is not None:
self._merges = [tuple(merge) if isinstance(merge, list) else merge for merge in merges]
else:
self._merges = []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
)
)
self._tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
self._tokenizer.decoder = decoders.ByteLevel()
# Load entity vocab
if entity_vocab is not None:
self.entity_vocab = entity_vocab
elif entity_vocab_file is not None:
with open(entity_vocab_file, encoding="utf-8") as f:
self.entity_vocab = json.load(f)
else:
# If no entity vocab provided, create a minimal one with required special tokens
self.entity_vocab = {
entity_unk_token: 0,
entity_pad_token: 1,
entity_mask_token: 2,
entity_mask2_token: 3,
}
# Validate entity special tokens
for entity_special_token in [entity_unk_token, entity_pad_token, entity_mask_token, entity_mask2_token]:
if entity_special_token not in self.entity_vocab:
raise ValueError(
f"Specified entity special token `{entity_special_token}` is not found in entity_vocab."
)
self.entity_unk_token_id = self.entity_vocab[entity_unk_token]
self.entity_pad_token_id = self.entity_vocab[entity_pad_token]
self.entity_mask_token_id = self.entity_vocab[entity_mask_token]
self.entity_mask2_token_id = self.entity_vocab[entity_mask2_token]
# Setup task and max_entity_length
self.task = task
if task is None or task == "entity_span_classification":
self.max_entity_length = max_entity_length
elif task == "entity_classification":
self.max_entity_length = 1
elif task == "entity_pair_classification":
self.max_entity_length = 2
else:
raise ValueError(
f"Task {task} not supported. Select task from ['entity_classification', 'entity_pair_classification',"
" 'entity_span_classification'] only."
)
self.max_mention_length = max_mention_length
# Add entity tokens to extra_special_tokens
entity_token_1 = (
AddedToken(entity_token_1, lstrip=False, rstrip=False)
if isinstance(entity_token_1, str)
else entity_token_1
)
entity_token_2 = (
AddedToken(entity_token_2, lstrip=False, rstrip=False)
if isinstance(entity_token_2, str)
else entity_token_2
)
# Handle extra/legacy special tokens (v4 hub files compat)
extra_tokens: list[AddedToken | str] = []
for key in ("extra_special_tokens", "additional_special_tokens"):
for token in kwargs.pop(key, []) or []:
extra_tokens.append(AddedToken(**token) if isinstance(token, dict) else token)
# Ensure LUKE entity tokens are present exactly once.
seen = {str(token) for token in extra_tokens}
for token in (entity_token_1, entity_token_2):
token_str = str(token)
if token_str not in seen:
extra_tokens.append(token)
seen.add(token_str)
kwargs["extra_special_tokens"] = extra_tokens
tokenizer_object = self._tokenizer
# Configure default special token behaviors to match LUKE formatting
token_type_ids_pattern = kwargs.setdefault("token_type_ids_pattern", "all_zeros")
special_tokens_pattern = kwargs.setdefault("special_tokens_pattern", "cls_double_sep")
token_type_ids_include_special_tokens = kwargs.setdefault("token_type_ids_include_special_tokens", True)
self.token_type_ids_pattern = token_type_ids_pattern
self.special_tokens_pattern = special_tokens_pattern
self.token_type_ids_include_special_tokens = token_type_ids_include_special_tokens
# Set clean_up_tokenization_spaces=True by default to match old Python tokenizer behavior
kwargs.setdefault("clean_up_tokenization_spaces", True)
super().__init__(
tokenizer_object=tokenizer_object,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
task=task,
max_entity_length=max_entity_length,
max_mention_length=max_mention_length,
entity_token_1=str(entity_token_1),
entity_token_2=str(entity_token_2),
entity_unk_token=entity_unk_token,
entity_pad_token=entity_pad_token,
entity_mask_token=entity_mask_token,
entity_mask2_token=entity_mask2_token,
entity_vocab=entity_vocab if entity_vocab_file is None else None, # Only store if it was passed as data
**kwargs,
)
self._post_init()
def _post_init(self):
self._tokenizer.post_processor = processors.TemplateProcessing(
single=f"{self.cls_token}:0 $A:0 {self.sep_token}:0",
pair=f"{self.cls_token}:0 $A:0 {self.sep_token}:0 {self.sep_token}:0 $B:1 {self.sep_token}:1",
special_tokens=[
(self.cls_token, self.cls_token_id),
(self.sep_token, self.sep_token_id),
],
)
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
return PreTrainedTokenizer.build_inputs_with_special_tokens(self, token_ids_0, token_ids_1)
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
return PreTrainedTokenizer.get_special_tokens_mask(
self, token_ids_0, token_ids_1, already_has_special_tokens=already_has_special_tokens
)
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
return PreTrainedTokenizer.create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1)
def _decode(
self,
token_ids: Union[int, list[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
**kwargs,
) -> str:
text = super()._decode(
token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=False, **kwargs
)
clean_up_tokenization_spaces = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
text = (
text.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return text
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, list[TextInput]],
text_pair: Optional[Union[TextInput, list[TextInput]]] = None,
entity_spans: Optional[Union[EntitySpanInput, list[EntitySpanInput]]] = None,
entity_spans_pair: Optional[Union[EntitySpanInput, list[EntitySpanInput]]] = None,
entities: Optional[Union[EntityInput, list[EntityInput]]] = None,
entities_pair: Optional[Union[EntityInput, list[EntityInput]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: Optional[bool] = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
# Check for seq2seq parameters that are not supported with entity-aware encoding
if kwargs.get("text_target") is not None or kwargs.get("text_pair_target") is not None:
if entity_spans is not None or entities is not None or self.task is not None:
raise NotImplementedError(
"text_target and text_pair_target are not supported when using entity-aware encoding. "
"Please use the tokenizer without entities for seq2seq tasks."
)
# Delegate to parent for seq2seq encoding
return super().__call__(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences, depending on the task you want to prepare them for.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
tokenizer does not support tokenization based on pretokenized strings.
text_pair (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
tokenizer does not support tokenization based on pretokenized strings.
entity_spans (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*):
The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
with two integers denoting character-based start and end positions of entities. If you specify
`"entity_classification"` or `"entity_pair_classification"` as the `task` argument in the constructor,
the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each
sequence must be equal to the length of each sequence of `entities`.
entity_spans_pair (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*):
The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
with two integers denoting character-based start and end positions of entities. If you specify the
`task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the
length of each sequence must be equal to the length of each sequence of `entities_pair`.
entities (`list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
each sequence must be equal to the length of each sequence of `entity_spans`. If you specify
`entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences
is automatically constructed by filling it with the [MASK] entity.
entities_pair (`list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify
`entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity
sequences is automatically constructed by filling it with the [MASK] entity.
max_entity_length (`int`, *optional*):
The maximum length of `entity_ids`.
"""
# Input type checking for clearer error
is_valid_single_text = isinstance(text, str)
is_valid_batch_text = isinstance(text, (list, tuple)) and (
len(text) == 0 or isinstance(text[0], (str, list, tuple))
)
if not (is_valid_single_text or is_valid_batch_text):
raise ValueError(
"text input must be of type `str` (single example), `list[str]` (batch), or `list[tuple]` (batch pairs)."
)
is_valid_single_text_pair = isinstance(text_pair, str)
is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and (
len(text_pair) == 0 or isinstance(text_pair[0], str)
)
if not (text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair):
raise ValueError("text_pair input must be of type `str` (single example) or `list[str]` (batch).")
is_batched = bool(isinstance(text, (list, tuple)))
# Convert padding and truncation to strategies
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
if is_batched:
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
if entities is None:
batch_entities_or_entities_pairs = None
else:
batch_entities_or_entities_pairs = (
list(zip(entities, entities_pair)) if entities_pair is not None else entities
)
if entity_spans is None:
batch_entity_spans_or_entity_spans_pairs = None
else:
batch_entity_spans_or_entity_spans_pairs = (
list(zip(entity_spans, entity_spans_pair)) if entity_spans_pair is not None else entity_spans
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs,
batch_entities_or_entities_pairs=batch_entities_or_entities_pairs,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self._encode_plus(
text=text,
text_pair=text_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
entities=entities,
entities_pair=entities_pair,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _encode_plus(
self,
text: Union[TextInput],
text_pair: Optional[Union[TextInput]] = None,
entity_spans: Optional[EntitySpanInput] = None,
entity_spans_pair: Optional[EntitySpanInput] = None,
entities: Optional[EntityInput] = None,
entities_pair: Optional[EntityInput] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: Optional[bool] = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
# If no entities are provided and task doesn't require them, delegate to parent for proper Encoding support
if (
entity_spans is None
and entity_spans_pair is None
and entities is None
and entities_pair is None
and self.task is None
):
# Delegate to parent TokenizersBackend which properly handles Encoding objects
return super()._encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
if return_offsets_mapping:
raise NotImplementedError("return_offset_mapping is not available when using entity-aware encoding.")
if is_split_into_words:
raise NotImplementedError("is_split_into_words is not supported in this tokenizer.")
(
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
) = self._create_input_sequence(
text=text,
text_pair=text_pair,
entities=entities,
entities_pair=entities_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
**kwargs,
)
# prepare_for_model will create the attention_mask and token_type_ids
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
entity_ids=first_entity_ids,
pair_entity_ids=second_entity_ids,
entity_token_spans=first_entity_token_spans,
pair_entity_token_spans=second_entity_token_spans,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair]],
batch_entity_spans_or_entity_spans_pairs: Optional[
Union[list[EntitySpanInput], list[tuple[EntitySpanInput, EntitySpanInput]]]
] = None,
batch_entities_or_entities_pairs: Optional[
Union[list[EntityInput], list[tuple[EntityInput, EntityInput]]]
] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: Optional[bool] = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
# If no entities are provided and task doesn't require them, delegate to parent for proper Encoding support
if (
batch_entity_spans_or_entity_spans_pairs is None
and batch_entities_or_entities_pairs is None
and self.task is None
):
# Parent's _encode_plus handles batching internally, so we reconstruct text/text_pair
# from batch_text_or_text_pairs and pass to parent's _encode_plus
# Detect if we have pairs
if batch_text_or_text_pairs and isinstance(batch_text_or_text_pairs[0], (tuple, list)):
# We have pairs
texts, text_pairs = zip(*batch_text_or_text_pairs)
texts = list(texts)
text_pairs = list(text_pairs)
else:
# Just texts
texts = batch_text_or_text_pairs
text_pairs = None
# Delegate to parent TokenizersBackend which properly handles Encoding objects for batches
return super()._encode_plus(
text=texts,
text_pair=text_pairs,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
split_special_tokens=kwargs.get("split_special_tokens", self.split_special_tokens),
**kwargs,
)
if return_offsets_mapping:
raise NotImplementedError("return_offset_mapping is not available when using entity-aware encoding.")
if is_split_into_words:
raise NotImplementedError("is_split_into_words is not supported in this tokenizer.")
# input_ids is a list of tuples (one for each example in the batch)
input_ids = []
entity_ids = []
entity_token_spans = []
for index, text_or_text_pair in enumerate(batch_text_or_text_pairs):
if not isinstance(text_or_text_pair, (list, tuple)):
text, text_pair = text_or_text_pair, None
else:
text, text_pair = text_or_text_pair
entities, entities_pair = None, None
if batch_entities_or_entities_pairs is not None:
entities_or_entities_pairs = batch_entities_or_entities_pairs[index]
if entities_or_entities_pairs:
if isinstance(entities_or_entities_pairs[0], str):
entities, entities_pair = entities_or_entities_pairs, None
else:
entities, entities_pair = entities_or_entities_pairs
entity_spans, entity_spans_pair = None, None
if batch_entity_spans_or_entity_spans_pairs is not None:
entity_spans_or_entity_spans_pairs = batch_entity_spans_or_entity_spans_pairs[index]
if len(entity_spans_or_entity_spans_pairs) > 0 and isinstance(
entity_spans_or_entity_spans_pairs[0], list
):
entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs
else:
entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs, None
(
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
) = self._create_input_sequence(
text=text,
text_pair=text_pair,
entities=entities,
entities_pair=entities_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
**kwargs,
)
input_ids.append((first_ids, second_ids))
entity_ids.append((first_entity_ids, second_entity_ids))
entity_token_spans.append((first_entity_token_spans, second_entity_token_spans))
batch_outputs = self._batch_prepare_for_model(
input_ids,
batch_entity_ids_pairs=entity_ids,
batch_entity_token_spans_pairs=entity_token_spans,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]):
if not isinstance(entity_spans, list):
raise TypeError("entity_spans should be given as a list")
elif len(entity_spans) > 0 and not isinstance(entity_spans[0], tuple):
raise ValueError(
"entity_spans should be given as a list of tuples containing the start and end character indices"
)
if entities is not None:
if not isinstance(entities, list):
raise ValueError("If you specify entities, they should be given as a list")
if len(entities) > 0 and not isinstance(entities[0], str):
raise ValueError("If you specify entities, they should be given as a list of entity names")
if len(entities) != len(entity_spans):
raise ValueError("If you specify entities, entities and entity_spans must be the same length")
def _create_input_sequence(
self,
text: Union[TextInput],
text_pair: Optional[Union[TextInput]] = None,
entities: Optional[EntityInput] = None,
entities_pair: Optional[EntityInput] = None,
entity_spans: Optional[EntitySpanInput] = None,
entity_spans_pair: Optional[EntitySpanInput] = None,
**kwargs,
) -> tuple[list, list, list, list, list, list]:
def get_input_ids(text):
# Use the underlying tokenizer directly to avoid recursion
encoding = self._tokenizer.encode(text, add_special_tokens=False)
return encoding.ids
def get_input_ids_and_entity_token_spans(text, entity_spans):
if entity_spans is None:
return get_input_ids(text), None
cur = 0
input_ids = []
entity_token_spans = [None] * len(entity_spans)
split_char_positions = sorted(frozenset(itertools.chain(*entity_spans)))
char_pos2token_pos = {}
for split_char_position in split_char_positions:
orig_split_char_position = split_char_position
if (
split_char_position > 0 and text[split_char_position - 1] == " "
): # whitespace should be prepended to the following token
split_char_position -= 1
if cur != split_char_position:
input_ids += get_input_ids(text[cur:split_char_position])
cur = split_char_position
char_pos2token_pos[orig_split_char_position] = len(input_ids)
input_ids += get_input_ids(text[cur:])
entity_token_spans = [
(char_pos2token_pos[char_start], char_pos2token_pos[char_end]) for char_start, char_end in entity_spans
]
return input_ids, entity_token_spans
first_ids, second_ids = None, None
first_entity_ids, second_entity_ids = None, None
first_entity_token_spans, second_entity_token_spans = None, None
if self.task is None:
if entity_spans is None:
first_ids = get_input_ids(text)
else:
self._check_entity_input_format(entities, entity_spans)
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
if entities is None:
first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
else:
first_entity_ids = [self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities]
if text_pair is not None:
if entity_spans_pair is None:
second_ids = get_input_ids(text_pair)
else:
self._check_entity_input_format(entities_pair, entity_spans_pair)
second_ids, second_entity_token_spans = get_input_ids_and_entity_token_spans(
text_pair, entity_spans_pair
)
if entities_pair is None:
second_entity_ids = [self.entity_mask_token_id] * len(entity_spans_pair)
else:
second_entity_ids = [
self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities_pair
]
elif self.task == "entity_classification":
if not (isinstance(entity_spans, list) and len(entity_spans) == 1 and isinstance(entity_spans[0], tuple)):
raise ValueError(
"Entity spans should be a list containing a single tuple "
"containing the start and end character indices of an entity"
)
first_entity_ids = [self.entity_mask_token_id]
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
# add special tokens to input ids
entity_token_start, entity_token_end = first_entity_token_spans[0]
first_ids = (
first_ids[:entity_token_end] + [self.extra_special_tokens_ids[0]] + first_ids[entity_token_end:]
)
first_ids = (
first_ids[:entity_token_start] + [self.extra_special_tokens_ids[0]] + first_ids[entity_token_start:]
)
first_entity_token_spans = [(entity_token_start, entity_token_end + 2)]
elif self.task == "entity_pair_classification":
if not (
isinstance(entity_spans, list)
and len(entity_spans) == 2
and isinstance(entity_spans[0], tuple)
and isinstance(entity_spans[1], tuple)
):
raise ValueError(
"Entity spans should be provided as a list of two tuples, "
"each tuple containing the start and end character indices of an entity"
)
head_span, tail_span = entity_spans
first_entity_ids = [self.entity_mask_token_id, self.entity_mask2_token_id]
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
head_token_span, tail_token_span = first_entity_token_spans
token_span_with_special_token_ids = [
(head_token_span, self.extra_special_tokens_ids[0]),
(tail_token_span, self.extra_special_tokens_ids[1]),
]
if head_token_span[0] < tail_token_span[0]:
first_entity_token_spans[0] = (head_token_span[0], head_token_span[1] + 2)
first_entity_token_spans[1] = (tail_token_span[0] + 2, tail_token_span[1] + 4)
token_span_with_special_token_ids.reverse()
else:
first_entity_token_spans[0] = (head_token_span[0] + 2, head_token_span[1] + 4)
first_entity_token_spans[1] = (tail_token_span[0], tail_token_span[1] + 2)
for (entity_token_start, entity_token_end), special_token_id in token_span_with_special_token_ids:
first_ids = first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:]
first_ids = first_ids[:entity_token_start] + [special_token_id] + first_ids[entity_token_start:]
elif self.task == "entity_span_classification":
if not (isinstance(entity_spans, list) and len(entity_spans) > 0 and isinstance(entity_spans[0], tuple)):
raise ValueError(
"Entity spans should be provided as a list of tuples, "
"each tuple containing the start and end character indices of an entity"
)
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
else:
raise ValueError(f"Task {self.task} not supported")
return (
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(
self,
batch_ids_pairs: list[tuple[list[int], None]],
batch_entity_ids_pairs: list[tuple[Optional[list[int]], Optional[list[int]]]],
batch_entity_token_spans_pairs: list[tuple[Optional[list[tuple[int, int]]], Optional[list[tuple[int, int]]]]],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
batch_entity_ids_pairs: list of entity ids or entity ids pairs
batch_entity_token_spans_pairs: list of entity spans or entity spans pairs
max_entity_length: The maximum length of the entity sequence.
"""
batch_outputs = {}
for input_ids, entity_ids, entity_token_span_pairs in zip(
batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs
):
first_ids, second_ids = input_ids
first_entity_ids, second_entity_ids = entity_ids
first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs
outputs = self.prepare_for_model(
first_ids,
second_ids,
entity_ids=first_entity_ids,
pair_entity_ids=second_entity_ids,
entity_token_spans=first_entity_token_spans,
pair_entity_token_spans=second_entity_token_spans,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
padding_side=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(
self,
ids: list[int],
pair_ids: Optional[list[int]] = None,
entity_ids: Optional[list[int]] = None,
pair_entity_ids: Optional[list[int]] = None,
entity_token_spans: Optional[list[tuple[int, int]]] = None,
pair_entity_token_spans: Optional[list[tuple[int, int]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
**kwargs,
) -> BatchEncoding:
"""
Prepares a sequence of input id, entity id and entity span, or a pair of sequences of inputs ids, entity ids,
entity spans so that it can be used by the model. It adds special tokens, truncates sequences if overflowing
while taking into account the special tokens and manages a moving window (with user defined stride) for
overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first*
or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an
error.
Args:
ids (`list[int]`):
Tokenized input ids of the first sequence.
pair_ids (`list[int]`, *optional*):
Tokenized input ids of the second sequence.
entity_ids (`list[int]`, *optional*):
Entity ids of the first sequence.
pair_entity_ids (`list[int]`, *optional*):
Entity ids of the second sequence.
entity_token_spans (`list[tuple[int, int]]`, *optional*):
Entity spans of the first sequence.
pair_entity_token_spans (`list[tuple[int, int]]`, *optional*):
Entity spans of the second sequence.
max_entity_length (`int`, *optional*):
The maximum length of the entity sequence.
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
# Compute lengths
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
if return_token_type_ids and not add_special_tokens:
raise ValueError(
"Asking to return token_type_ids while setting add_special_tokens to False "
"results in an undefined behavior. Please set add_special_tokens to True or "
"set return_token_type_ids to None."
)
if (
return_overflowing_tokens
and truncation_strategy == TruncationStrategy.LONGEST_FIRST
and pair_ids is not None
):
raise ValueError(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Compute the total size of the returned word encodings
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
# Truncation: Handle max sequence length and max_entity_length
overflowing_tokens = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
# truncate words up to max_length
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
entity_token_offset = 1 # 1 * <s> token
pair_entity_token_offset = len(ids) + 3 # 1 * <s> token & 2 * <sep> tokens
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
entity_token_offset = 0
pair_entity_token_offset = len(ids)
# Build output dictionary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
# Set max entity length
if not max_entity_length:
max_entity_length = self.max_entity_length
if entity_ids is not None:
total_entity_len = 0
num_invalid_entities = 0
valid_entity_ids = [ent_id for ent_id, span in zip(entity_ids, entity_token_spans) if span[1] <= len(ids)]
valid_entity_token_spans = [span for span in entity_token_spans if span[1] <= len(ids)]
total_entity_len += len(valid_entity_ids)
num_invalid_entities += len(entity_ids) - len(valid_entity_ids)
valid_pair_entity_ids, valid_pair_entity_token_spans = None, None
if pair_entity_ids is not None:
valid_pair_entity_ids = [
ent_id
for ent_id, span in zip(pair_entity_ids, pair_entity_token_spans)
if span[1] <= len(pair_ids)
]
valid_pair_entity_token_spans = [span for span in pair_entity_token_spans if span[1] <= len(pair_ids)]
total_entity_len += len(valid_pair_entity_ids)
num_invalid_entities += len(pair_entity_ids) - len(valid_pair_entity_ids)
if num_invalid_entities != 0:
logger.warning(
f"{num_invalid_entities} entities are ignored because their entity spans are invalid due to the"
" truncation of input tokens"
)
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and total_entity_len > max_entity_length:
# truncate entities up to max_entity_length
valid_entity_ids, valid_pair_entity_ids, overflowing_entities = self.truncate_sequences(
valid_entity_ids,
pair_ids=valid_pair_entity_ids,
num_tokens_to_remove=total_entity_len - max_entity_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
valid_entity_token_spans = valid_entity_token_spans[: len(valid_entity_ids)]
if valid_pair_entity_token_spans is not None:
valid_pair_entity_token_spans = valid_pair_entity_token_spans[: len(valid_pair_entity_ids)]
if return_overflowing_tokens:
encoded_inputs["overflowing_entities"] = overflowing_entities
encoded_inputs["num_truncated_entities"] = total_entity_len - max_entity_length
final_entity_ids = valid_entity_ids + valid_pair_entity_ids if valid_pair_entity_ids else valid_entity_ids
encoded_inputs["entity_ids"] = list(final_entity_ids)
entity_position_ids = []
entity_start_positions = []
entity_end_positions = []
for token_spans, offset in (
(valid_entity_token_spans, entity_token_offset),
(valid_pair_entity_token_spans, pair_entity_token_offset),
):
if token_spans is not None:
for start, end in token_spans:
start += offset
end += offset
position_ids = list(range(start, end))[: self.max_mention_length]
position_ids += [-1] * (self.max_mention_length - end + start)
entity_position_ids.append(position_ids)
entity_start_positions.append(start)
entity_end_positions.append(end - 1)
encoded_inputs["entity_position_ids"] = entity_position_ids
if self.task == "entity_span_classification":
encoded_inputs["entity_start_positions"] = entity_start_positions
encoded_inputs["entity_end_positions"] = entity_end_positions
if return_token_type_ids:
encoded_inputs["entity_token_type_ids"] = [0] * len(encoded_inputs["entity_ids"])
# Check lengths
self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
# Padding
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
def pad(
self,
encoded_inputs: Union[
BatchEncoding,
list[BatchEncoding],
dict[str, EncodedInput],
dict[str, list[EncodedInput]],
list[dict[str, EncodedInput]],
],
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
verbose: bool = True,
) -> BatchEncoding:
"""
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with
`self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`) .. note:: If the `encoded_inputs` passed
are dictionary of numpy arrays or PyTorch tensors the result will use the same type unless
you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the
specific device of your tensors however.
Args:
encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `dict[str, list[int]]`, `dict[str, list[list[int]]` or `list[dict[str, list[int]]]`):
Tokenized inputs. Can represent one input ([`BatchEncoding`] or `dict[str, list[int]]`) or a batch of
tokenized inputs (list of [`BatchEncoding`], *dict[str, list[list[int]]]* or *list[dict[str,
list[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
collate function. Instead of `list[int]` you can have tensors (numpy arrays, or PyTorch tensors),
see the note above for the return type.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
max_entity_length (`int`, *optional*):
The maximum length of the entity sequence.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention
masks?](../glossary#attention-mask)
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping):
# Call .keys() explicitly for compatibility with TensorDict and other Mapping subclasses
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
# The model's main input name, usually `input_ids`, has be passed for padding
if self.model_input_names[0] not in encoded_inputs:
raise ValueError(
"You should supply an encoding or a list of encodings to this method "
f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
)
required_input = encoded_inputs[self.model_input_names[0]]
if not required_input:
if return_attention_mask:
encoded_inputs["attention_mask"] = []
return encoded_inputs
# If we have PyTorch/NumPy tensors/arrays as inputs, we cast them as python objects
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
index = 0
while len(required_input[index]) == 0:
index += 1
if index < len(required_input):
first_element = required_input[index][0]
# At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
if not isinstance(first_element, (int, list, tuple)):
if is_torch_tensor(first_element):
return_tensors = "pt" if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = "np" if return_tensors is None else return_tensors
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
"Should be one of a python, numpy, or pytorch object."
)
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
# Convert padding_strategy in PaddingStrategy
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
padding=padding, max_length=max_length, verbose=verbose
)
if max_entity_length is None:
max_entity_length = self.max_entity_length
required_input = encoded_inputs[self.model_input_names[0]]
if required_input and not isinstance(required_input[0], (list, tuple)):
encoded_inputs = self._pad(
encoded_inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(required_input)
if any(len(v) != batch_size for v in encoded_inputs.values()):
raise ValueError("Some items in the output dictionary have a different batch size than others.")
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max(len(inputs) for inputs in required_input)
max_entity_length = (
max(len(inputs) for inputs in encoded_inputs["entity_ids"]) if "entity_ids" in encoded_inputs else 0
)
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = {k: v[i] for k, v in encoded_inputs.items()}
outputs = self._pad(
inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def _pad(
self,
encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
max_entity_length: The maximum length of the entity sequence.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
entities_provided = bool("entity_ids" in encoded_inputs)
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(encoded_inputs["input_ids"])
if entities_provided:
max_entity_length = len(encoded_inputs["entity_ids"])
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
if (
entities_provided
and max_entity_length is not None
and pad_to_multiple_of is not None
and (max_entity_length % pad_to_multiple_of != 0)
):
max_entity_length = ((max_entity_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and (
len(encoded_inputs["input_ids"]) != max_length
or (entities_provided and len(encoded_inputs["entity_ids"]) != max_entity_length)
)
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
if entities_provided and return_attention_mask and "entity_attention_mask" not in encoded_inputs:
encoded_inputs["entity_attention_mask"] = [1] * len(encoded_inputs["entity_ids"])
if needs_to_be_padded:
difference = max_length - len(encoded_inputs["input_ids"])
padding_side = padding_side if padding_side is not None else self.padding_side
if entities_provided:
entity_difference = max_entity_length - len(encoded_inputs["entity_ids"])
if padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if entities_provided:
encoded_inputs["entity_attention_mask"] = (
encoded_inputs["entity_attention_mask"] + [0] * entity_difference
)
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"] + [0] * difference
if entities_provided:
encoded_inputs["entity_token_type_ids"] = (
encoded_inputs["entity_token_type_ids"] + [0] * entity_difference
)
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
if entities_provided:
encoded_inputs["entity_ids"] = (
encoded_inputs["entity_ids"] + [self.entity_pad_token_id] * entity_difference
)
encoded_inputs["entity_position_ids"] = (
encoded_inputs["entity_position_ids"] + [[-1] * self.max_mention_length] * entity_difference
)
if self.task == "entity_span_classification":
encoded_inputs["entity_start_positions"] = (
encoded_inputs["entity_start_positions"] + [0] * entity_difference
)
encoded_inputs["entity_end_positions"] = (
encoded_inputs["entity_end_positions"] + [0] * entity_difference
)
elif padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if entities_provided:
encoded_inputs["entity_attention_mask"] = [0] * entity_difference + encoded_inputs[
"entity_attention_mask"
]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [0] * difference + encoded_inputs["token_type_ids"]
if entities_provided:
encoded_inputs["entity_token_type_ids"] = [0] * entity_difference + encoded_inputs[
"entity_token_type_ids"
]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
if entities_provided:
encoded_inputs["entity_ids"] = [self.entity_pad_token_id] * entity_difference + encoded_inputs[
"entity_ids"
]
encoded_inputs["entity_position_ids"] = [
[-1] * self.max_mention_length
] * entity_difference + encoded_inputs["entity_position_ids"]
if self.task == "entity_span_classification":
encoded_inputs["entity_start_positions"] = [0] * entity_difference + encoded_inputs[
"entity_start_positions"
]
encoded_inputs["entity_end_positions"] = [0] * entity_difference + encoded_inputs[
"entity_end_positions"
]
else:
raise ValueError("Invalid padding strategy:" + str(padding_side))
return encoded_inputs
__all__ = ["LukeTokenizer"]
| LukeTokenizer |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 285,
"end": 404
} | class ____(StringEnum):
disabled = "disabled"
no_rois = "no_rois"
label_rules = "label_rules"
| FilterByRoiEnum |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 582954,
"end": 583474
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of EnqueuePullRequest"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "merge_queue_entry")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
merge_queue_entry = sgqlc.types.Field("MergeQueueEntry", graphql_name="mergeQueueEntry")
"""The merge queue entry for the enqueued pull request."""
| EnqueuePullRequestPayload |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 65063,
"end": 66100
} | class ____(ASTTrailingTypeSpec):
def __init__(self, expr: ASTExpression) -> None:
self.expr = expr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTTrailingTypeSpecDecltype):
return NotImplemented
return self.expr == other.expr
def __hash__(self) -> int:
return hash(self.expr)
def _stringify(self, transform: StringifyTransform) -> str:
return 'decltype(' + transform(self.expr) + ')'
def get_id(self, version: int) -> str:
if version == 1:
raise NoOldIdError
return 'DT' + self.expr.get_id(version) + 'E'
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_keyword('decltype', 'decltype')
signode += addnodes.desc_sig_punctuation('(', '(')
self.expr.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
| ASTTrailingTypeSpecDecltype |
python | walkccc__LeetCode | solutions/1389. Create Target Array in the Given Order/1389.py | {
"start": 0,
"end": 149
} | class ____:
def createTargetArray(self, nums, index):
ans = []
for num, i in zip(nums, index):
ans.insert(i, num)
return ans
| Solution |
python | PrefectHQ__prefect | src/prefect/utilities/annotations.py | {
"start": 1466,
"end": 1908
} | class ____(BaseAnnotation[T]):
"""
Wrapper for states or futures.
Indicates that the upstream run for this input can be failed.
Generally, Prefect will not allow a downstream run to start if any of its inputs
are failed. This annotation allows you to opt into receiving a failed input
downstream.
If the input is from a failed run, the attached exception will be passed to your
function.
"""
| allow_failure |
python | tensorflow__tensorflow | tensorflow/python/trackable/base.py | {
"start": 1489,
"end": 2474
} | class ____(object):
"""A named reference to a trackable object for use with the `Trackable` class.
These references mark named `Trackable` dependencies of a `Trackable` object
and should be created when overriding `Trackable._checkpoint_dependencies`.
Attributes:
name: The local name for this dependency.
ref: The `Trackable` object being referenced.
"""
__slots__ = ("_name", "_ref")
def __init__(self, name, ref):
self._name = name
self._ref = ref
@property
def name(self):
return self._name
@property
def ref(self):
return self._ref
def __iter__(self):
yield self.name
yield self.ref
def __repr__(self):
return f"{self.__class__.__name__}(name={self.name}, ref={self.ref})"
def __eq__(self, o):
if isinstance(o, tuple):
return (self.name, self.ref) == o
elif isinstance(o, TrackableReference):
return self.name == o.name and self.ref == o.ref
else:
return False
| TrackableReference |
python | marshmallow-code__marshmallow | tests/base.py | {
"start": 7039,
"end": 7203
} | class ____: # noqa: N801
@staticmethod
def dumps(val):
return b"{'foo': 42}"
@staticmethod
def loads(val):
return {"foo": 42}
| mockjson |
python | python__mypy | mypy/test/teststubgen.py | {
"start": 57949,
"end": 59251
} | class ____(unittest.TestCase):
def test_is_valid_type(self) -> None:
assert is_valid_type("int")
assert is_valid_type("str")
assert is_valid_type("Foo_Bar234")
assert is_valid_type("foo.bar")
assert is_valid_type("List[int]")
assert is_valid_type("Dict[str, int]")
assert is_valid_type("None")
assert is_valid_type("Literal[26]")
assert is_valid_type("Literal[0x1A]")
assert is_valid_type('Literal["hello world"]')
assert is_valid_type('Literal[b"hello world"]')
assert is_valid_type('Literal[u"hello world"]')
assert is_valid_type("Literal[True]")
assert is_valid_type("Literal[Color.RED]")
assert is_valid_type("Literal[None]")
assert is_valid_type("str | int")
assert is_valid_type("dict[str, int] | int")
assert is_valid_type("tuple[str, ...]")
assert is_valid_type(
'Literal[26, 0x1A, "hello world", b"hello world", u"hello world", True, Color.RED, None]'
)
assert not is_valid_type("foo-bar")
assert not is_valid_type("x->y")
assert not is_valid_type("True")
assert not is_valid_type("False")
assert not is_valid_type("x,y")
assert not is_valid_type("x, y")
| IsValidTypeSuite |
python | getsentry__sentry-python | sentry_sdk/integrations/grpc/server.py | {
"start": 534,
"end": 2466
} | class ____(grpc.ServerInterceptor): # type: ignore
def __init__(self, find_name=None):
# type: (ServerInterceptor, Optional[Callable[[ServicerContext], str]]) -> None
self._find_method_name = find_name or ServerInterceptor._find_name
super().__init__()
def intercept_service(self, continuation, handler_call_details):
# type: (ServerInterceptor, Callable[[HandlerCallDetails], RpcMethodHandler], HandlerCallDetails) -> RpcMethodHandler
handler = continuation(handler_call_details)
if not handler or not handler.unary_unary:
return handler
def behavior(request, context):
# type: (Message, ServicerContext) -> Message
with sentry_sdk.isolation_scope():
name = self._find_method_name(context)
if name:
metadata = dict(context.invocation_metadata())
transaction = sentry_sdk.continue_trace(
metadata,
op=OP.GRPC_SERVER,
name=name,
source=TransactionSource.CUSTOM,
origin=SPAN_ORIGIN,
)
with sentry_sdk.start_transaction(transaction=transaction):
try:
return handler.unary_unary(request, context)
except BaseException as e:
raise e
else:
return handler.unary_unary(request, context)
return grpc.unary_unary_rpc_method_handler(
behavior,
request_deserializer=handler.request_deserializer,
response_serializer=handler.response_serializer,
)
@staticmethod
def _find_name(context):
# type: (ServicerContext) -> str
return context._rpc_event.call_details.method.decode()
| ServerInterceptor |
python | huggingface__transformers | src/transformers/models/data2vec/modular_data2vec_text.py | {
"start": 2533,
"end": 2740
} | class ____(RobertaClassificationHead):
pass
@auto_docstring(
custom_intro="""
Data2VecText Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
| Data2VecTextClassificationHead |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 188276,
"end": 197409
} | class ____(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
The :class:`.PrimaryKeyConstraint` object is present automatically
on any :class:`_schema.Table` object; it is assigned a set of
:class:`_schema.Column` objects corresponding to those marked with
the :paramref:`_schema.Column.primary_key` flag::
>>> my_table = Table(
... "mytable",
... metadata,
... Column("id", Integer, primary_key=True),
... Column("version_id", Integer, primary_key=True),
... Column("data", String(50)),
... )
>>> my_table.primary_key
PrimaryKeyConstraint(
Column('id', Integer(), table=<mytable>,
primary_key=True, nullable=False),
Column('version_id', Integer(), table=<mytable>,
primary_key=True, nullable=False)
)
The primary key of a :class:`_schema.Table` can also be specified by using
a :class:`.PrimaryKeyConstraint` object explicitly; in this mode of usage,
the "name" of the constraint can also be specified, as well as other
options which may be recognized by dialects::
my_table = Table(
"mytable",
metadata,
Column("id", Integer),
Column("version_id", Integer),
Column("data", String(50)),
PrimaryKeyConstraint("id", "version_id", name="mytable_pk"),
)
The two styles of column-specification should generally not be mixed.
An warning is emitted if the columns present in the
:class:`.PrimaryKeyConstraint`
don't match the columns that were marked as ``primary_key=True``, if both
are present; in this case, the columns are taken strictly from the
:class:`.PrimaryKeyConstraint` declaration, and those columns otherwise
marked as ``primary_key=True`` are ignored. This behavior is intended to
be backwards compatible with previous behavior.
For the use case where specific options are to be specified on the
:class:`.PrimaryKeyConstraint`, but the usual style of using
``primary_key=True`` flags is still desirable, an empty
:class:`.PrimaryKeyConstraint` may be specified, which will take on the
primary key column collection from the :class:`_schema.Table` based on the
flags::
my_table = Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
Column("version_id", Integer, primary_key=True),
Column("data", String(50)),
PrimaryKeyConstraint(name="mytable_pk", mssql_clustered=True),
)
"""
__visit_name__ = "primary_key_constraint"
def __init__(
self,
*columns: _DDLColumnArgument,
name: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
info: Optional[_InfoType] = None,
_implicit_generated: bool = False,
**dialect_kw: Any,
) -> None:
self._implicit_generated = _implicit_generated
super().__init__(
*columns,
name=name,
deferrable=deferrable,
initially=initially,
info=info,
**dialect_kw,
)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
table = parent
assert isinstance(table, Table)
super()._set_parent(table)
if table.primary_key is not self:
table.constraints.discard(table.primary_key)
table.primary_key = self # type: ignore
table.constraints.add(self)
table_pks = [c for c in table.c if c.primary_key]
if (
self._columns
and table_pks
and set(table_pks) != set(self._columns)
):
# black could not format these inline
table_pk_str = ", ".join("'%s'" % c.name for c in table_pks)
col_str = ", ".join("'%s'" % c.name for c in self._columns)
util.warn(
f"Table '{table.name}' specifies columns "
f"{table_pk_str} as "
f"primary_key=True, "
f"not matching locally specified columns {col_str}; "
f"setting the "
f"current primary key columns to "
f"{col_str}. "
f"This warning "
f"may become an exception in a future release"
)
table_pks[:] = []
for c in self._columns:
c.primary_key = True
if c._user_defined_nullable is NULL_UNSPECIFIED:
c.nullable = False
if table_pks:
self._columns.extend(table_pks)
def _reload(self, columns: Iterable[Column[Any]]) -> None:
"""repopulate this :class:`.PrimaryKeyConstraint` given
a set of columns.
Existing columns in the table that are marked as primary_key=True
are maintained.
Also fires a new event.
This is basically like putting a whole new
:class:`.PrimaryKeyConstraint` object on the parent
:class:`_schema.Table` object without actually replacing the object.
The ordering of the given list of columns is also maintained; these
columns will be appended to the list of columns after any which
are already present.
"""
# set the primary key flag on new columns.
# note any existing PK cols on the table also have their
# flag still set.
for col in columns:
col.primary_key = True
self._columns.extend(columns)
PrimaryKeyConstraint._autoincrement_column._reset(self) # type: ignore
self._set_parent_with_dispatch(self.table)
def _replace(self, col: Column[Any]) -> None:
PrimaryKeyConstraint._autoincrement_column._reset(self) # type: ignore
self._columns.replace(col)
self.dispatch._sa_event_column_added_to_pk_constraint(self, col)
@property
def columns_autoinc_first(self) -> List[Column[Any]]:
autoinc = self._autoincrement_column
if autoinc is not None:
return [autoinc] + [c for c in self._columns if c is not autoinc]
else:
return list(self._columns)
@util.ro_memoized_property
def _autoincrement_column(self) -> Optional[Column[int]]:
def _validate_autoinc(col: Column[Any], autoinc_true: bool) -> bool:
if col.type._type_affinity is not None and issubclass(
col.type._type_affinity, type_api.NUMERICTYPE._type_affinity
):
scale = col.type.scale # type: ignore[attr-defined]
if scale != 0 and autoinc_true:
raise exc.ArgumentError(
f"Column type {col.type} with non-zero scale "
f"{scale} on column '{col}' is not "
f"compatible with autoincrement=True"
)
elif not autoinc_true:
return False
elif col.type._type_affinity is None or not issubclass(
col.type._type_affinity, type_api.INTEGERTYPE._type_affinity
):
if autoinc_true:
raise exc.ArgumentError(
f"Column type {col.type} on column '{col}' is not "
f"compatible with autoincrement=True"
)
else:
return False
elif (
col.default is not None
and not isinstance(col.default, Sequence)
and not autoinc_true
):
return False
elif (
col.server_default is not None
and not isinstance(col.server_default, Identity)
and not autoinc_true
):
return False
elif col.foreign_keys and col.autoincrement not in (
True,
"ignore_fk",
):
return False
return True
if len(self._columns) == 1:
col = list(self._columns)[0]
if col.autoincrement is True:
_validate_autoinc(col, True)
return col
elif col.autoincrement in (
"auto",
"ignore_fk",
) and _validate_autoinc(col, False):
return col
else:
return None
else:
autoinc = None
for col in self._columns:
if col.autoincrement is True:
_validate_autoinc(col, True)
if autoinc is not None:
raise exc.ArgumentError(
f"Only one Column may be marked "
f"autoincrement=True, found both "
f"{col.name} and {autoinc.name}."
)
else:
autoinc = col
return autoinc
| PrimaryKeyConstraint |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/react/base.py | {
"start": 5528,
"end": 6321
} | class ____(AgentExecutor):
"""[Deprecated] Chain that implements the ReAct paper."""
def __init__(self, llm: BaseLanguageModel, docstore: Docstore, **kwargs: Any):
"""Initialize with the LLM and a docstore."""
docstore_explorer = DocstoreExplorer(docstore)
tools = [
Tool(
name="Search",
func=docstore_explorer.search,
description="Search for a term in the docstore.",
),
Tool(
name="Lookup",
func=docstore_explorer.lookup,
description="Lookup a term in the docstore.",
),
]
agent = ReActDocstoreAgent.from_llm_and_tools(llm, tools)
super().__init__(agent=agent, tools=tools, **kwargs)
| ReActChain |
python | ray-project__ray | python/ray/serve/tests/unit/test_config.py | {
"start": 32829,
"end": 36524
} | class ____:
def test_empty_fields(self):
"""Test _proto_to_dict() to deserialize protobuf with empty fields"""
proto = DeploymentConfigProto()
result = _proto_to_dict(proto)
# Defaults are filled.
assert result["num_replicas"] == 0
assert result["max_ongoing_requests"] == 0
assert result["user_config"] == b""
assert result["user_configured_option_names"] == []
# Nested profobufs don't exist.
assert "autoscaling_config" not in result
def test_non_empty_fields(self):
"""Test _proto_to_dict() to deserialize protobuf with non-empty fields"""
num_replicas = 111
max_ongoing_requests = 222
proto = DeploymentConfigProto(
num_replicas=num_replicas,
max_ongoing_requests=max_ongoing_requests,
)
result = _proto_to_dict(proto)
# Fields with non-empty values are filled correctly.
assert result["num_replicas"] == num_replicas
assert result["max_ongoing_requests"] == max_ongoing_requests
# Empty fields are continue to be filled with default values.
assert result["user_config"] == b""
def test_nested_protobufs(self):
"""Test _proto_to_dict() to deserialize protobuf with nested protobufs"""
num_replicas = 111
max_ongoing_requests = 222
min_replicas = 333
proto = DeploymentConfigProto(
num_replicas=num_replicas,
max_ongoing_requests=max_ongoing_requests,
autoscaling_config=AutoscalingConfigProto(
min_replicas=min_replicas,
),
)
result = _proto_to_dict(proto)
# Non-empty field is filled correctly.
assert result["num_replicas"] == num_replicas
assert result["max_ongoing_requests"] == max_ongoing_requests
# Nested protobuf is filled correctly.
assert result["autoscaling_config"]["min_replicas"] == min_replicas
def test_repeated_field(self):
"""Test _proto_to_dict() to deserialize protobuf with repeated field"""
user_configured_option_names = ["foo", "bar"]
config = DeploymentConfig.from_default(
user_configured_option_names=user_configured_option_names,
)
proto_bytes = config.to_proto_bytes()
proto = DeploymentConfigProto.FromString(proto_bytes)
result = _proto_to_dict(proto)
# Repeated field is filled correctly as list.
assert set(result["user_configured_option_names"]) == set(
user_configured_option_names
)
assert isinstance(result["user_configured_option_names"], list)
def test_enum_field(self):
"""Test _proto_to_dict() to deserialize protobuf with enum field"""
proto = DeploymentConfigProto(
deployment_language=DeploymentLanguage.JAVA,
)
result = _proto_to_dict(proto)
# Enum field is filled correctly.
assert result["deployment_language"] == DeploymentLanguage.JAVA
def test_optional_field(self):
"""Test _proto_to_dict() to deserialize protobuf with optional field"""
min_replicas = 1
proto = AutoscalingConfigProto(
min_replicas=min_replicas,
)
result = _proto_to_dict(proto)
# Non-empty field is filled correctly.
assert result["min_replicas"] == 1
# Empty field is filled correctly.
assert result["max_replicas"] == 0
# Optional field should not be filled.
assert "initial_replicas" not in result
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestProtoToDict |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 98837,
"end": 104261
} | class ____:
def setup_method(self):
self.a1 = [3, 4, 5, 10, -3, -5, 6]
self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
def test_basic(self):
x = arange(8) * 0.5
assert_equal(stats.scoreatpercentile(x, 0), 0.)
assert_equal(stats.scoreatpercentile(x, 100), 3.5)
assert_equal(stats.scoreatpercentile(x, 50), 1.75)
def test_fraction(self):
scoreatperc = stats.scoreatpercentile
# Test defaults
assert_equal(scoreatperc(list(range(10)), 50), 4.5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5)
# explicitly specify interpolation_method 'fraction' (the default)
assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100),
interpolation_method='fraction'),
55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10),
interpolation_method='fraction'),
5.5)
def test_lower_higher(self):
scoreatperc = stats.scoreatpercentile
# interpolation_method 'lower'/'higher'
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100),
interpolation_method='lower'), 10)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100),
interpolation_method='higher'), 100)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10),
interpolation_method='lower'), 1)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10),
interpolation_method='higher'), 10)
def test_sequence_per(self):
x = arange(8) * 0.5
expected = np.array([0, 3.5, 1.75])
res = stats.scoreatpercentile(x, [0, 100, 50])
assert_allclose(res, expected)
assert_(isinstance(res, np.ndarray))
# Test with ndarray. Regression test for gh-2861
assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])),
expected)
# Also test combination of 2-D array, axis not None and array-like per
res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)),
np.array([0, 1, 100, 100]), axis=1)
expected2 = array([[0, 4, 8],
[0.03, 4.03, 8.03],
[3, 7, 11],
[3, 7, 11]])
assert_allclose(res2, expected2)
def test_axis(self):
scoreatperc = stats.scoreatpercentile
x = arange(12).reshape(3, 4)
assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0])
r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0)
r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1)
x = array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
score = stats.scoreatpercentile(x, 50)
assert_equal(score.shape, ())
assert_equal(score, 1.0)
score = stats.scoreatpercentile(x, 50, axis=0)
assert_equal(score.shape, (3,))
assert_equal(score, [1, 1, 1])
def test_exception(self):
assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56,
interpolation_method='foobar')
assert_raises(ValueError, stats.scoreatpercentile, [1], 101)
assert_raises(ValueError, stats.scoreatpercentile, [1], -1)
def test_empty(self):
assert_equal(stats.scoreatpercentile([], 50), np.nan)
assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan)
assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan])
@make_xp_test_case(stats.mode)
| TestScoreatpercentile |
python | realpython__materials | inheritance-and-composition/choosing/hr.py | {
"start": 1026,
"end": 1240
} | class ____(PayrollPolicy):
def __init__(self, weekly_salary):
super().__init__()
self.weekly_salary = weekly_salary
def calculate_payroll(self):
return self.weekly_salary
| SalaryPolicy |
python | dask__distributed | distributed/http/scheduler/api.py | {
"start": 1719,
"end": 2219
} | class ____(RequestHandler):
def get(self):
self.set_header("Content-Type", "application/json")
scheduler = self.server
try:
desired_workers = scheduler.adaptive_target()
response = {
"workers": desired_workers,
}
self.write(json.dumps(response))
except Exception as e:
self.set_status(500, str(e))
self.write(json.dumps({"Error": "Internal Server Error"}))
| AdaptiveTargetHandler |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 52196,
"end": 53007
} | class ____(Operation):
def call(self, x, y):
return backend.numpy.bitwise_or(x, y)
def compute_output_spec(self, x, y):
dtype = dtypes.result_type(x.dtype, y.dtype)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.bitwise_or", "keras.ops.numpy.bitwise_or"])
def bitwise_or(x, y):
"""Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of the
integers in the input arrays. This ufunc implements the C/Python operator
`|`.
Args:
x: Input integer tensor.
y: Input integer tensor.
Returns:
Result tensor.
"""
if any_symbolic_tensors((x, y)):
return BitwiseOr().symbolic_call(x, y)
return backend.numpy.bitwise_or(x, y)
| BitwiseOr |
python | bokeh__bokeh | src/bokeh/command/subcommands/settings.py | {
"start": 6860,
"end": 8178
} | class ____:
exact_matches: list[str] = field(default_factory=list)
fuzzy_matches: dict[str, list[str]] = field(default_factory=dict)
not_found: list[str] = field(default_factory=list)
def resolve_setting_names(input_names: list[str], all_settings: dict[str, Any]) -> ResolutionResult:
"""Resolve user-supplied setting names into matches against all_settings."""
result = ResolutionResult()
for name in input_names:
if name in all_settings:
result.exact_matches.append(name)
continue
substring_matches = [k for k in all_settings if name.lower() in k.lower()]
if substring_matches:
result.exact_matches.extend(substring_matches)
else:
close = get_close_matches(name, all_settings.keys(), n=3, cutoff=0.6)
if close:
result.fuzzy_matches[name] = close
else:
result.not_found.append(name)
return result
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ResolutionResult |
python | openai__openai-python | src/openai/types/evals/create_eval_completions_run_data_source_param.py | {
"start": 1594,
"end": 1848
} | class ____(TypedDict, total=False):
content: Required[Iterable[SourceFileContentContent]]
"""The content of the jsonl file."""
type: Required[Literal["file_content"]]
"""The type of jsonl source. Always `file_content`."""
| SourceFileContent |
python | dask__distributed | distributed/spans.py | {
"start": 16254,
"end": 22426
} | class ____:
"""Scheduler extension for spans support"""
scheduler: Scheduler
#: All Span objects by id
spans: dict[str, Span]
#: Only the spans that don't have any parents, sorted by creation time.
#: This is a convenience helper structure to speed up searches.
root_spans: list[Span]
#: All spans, keyed by their full name and sorted by creation time.
#: This is a convenience helper structure to speed up searches.
spans_search_by_name: defaultdict[tuple[str, ...], list[Span]]
#: All spans, keyed by the individual tags that make up their name and sorted by
#: creation time.
#: This is a convenience helper structure to speed up searches.
#:
#: See Also
#: --------
#: find_by_tags
#: merge_by_tags
spans_search_by_tag: defaultdict[str, list[Span]]
def __init__(self, scheduler: Scheduler):
self.scheduler = scheduler
self.spans = {}
self.root_spans = []
self.spans_search_by_name = defaultdict(list)
self.spans_search_by_tag = defaultdict(list)
def observe_tasks(
self,
tss: Iterable[scheduler_module.TaskState],
code: tuple[SourceCode, ...],
span_metadata: SpanMetadata,
) -> dict[Key, dict]:
"""Acknowledge the existence of runnable tasks on the scheduler. These may
either be new tasks, tasks that were previously unrunnable, or tasks that were
already fed into this method already.
Attach newly observed tasks to either the desired span or to ("default", ).
Update TaskGroup.span_id and wipe TaskState.annotations["span"].
Returns
-------
Updated 'span' annotations: {key: {"name": (..., ...), "ids": (..., ...)}}
"""
out = {}
default_span = None
for ts in tss:
if ts.annotations is None:
ts.annotations = dict()
# You may have different tasks belonging to the same TaskGroup but to
# different spans. If that happens, arbitrarily force everything onto the
# span of the earliest encountered TaskGroup.
tg = ts.group
if tg.span_id:
span = self.spans[tg.span_id]
else:
ann = ts.annotations.get("span")
if ann:
span = self._ensure_span(ann["name"], ann["ids"])
else:
if not default_span:
default_span = self._ensure_default_span()
span = default_span
tg.span_id = span.id
span.groups.add(tg)
if code:
span._code[code] = None
if span_metadata:
span.add_metadata(span_metadata)
# The span may be completely different from the one referenced by the
# annotation, due to the TaskGroup collision issue explained above.
if ann := span.annotation:
ts.annotations["span"] = out[ts.key] = ann
else:
ts.annotations.pop("span", None)
return out
def _ensure_default_span(self) -> Span:
"""Return the currently active default span, or create one if the previous one
terminated. In other words, do not reuse the previous default span if all tasks
that were not explicitly annotated with :func:`spans` on the client side are
finished.
"""
defaults = self.spans_search_by_name["default",]
if defaults and not defaults[-1].done:
return defaults[-1]
return self._ensure_span(("default",), (str(uuid.uuid4()),))
def _ensure_span(self, name: tuple[str, ...], ids: tuple[str, ...]) -> Span:
"""Create Span if it doesn't exist and return it"""
try:
return self.spans[ids[-1]]
except KeyError:
pass
assert len(name) == len(ids)
assert len(name) > 0
parent = None
for i in range(1, len(name)):
parent = self._ensure_span(name[:i], ids[:i])
span = Span(
name=name,
id_=ids[-1],
parent=parent,
total_nthreads_history=self.scheduler.total_nthreads_history,
)
self.spans[span.id] = span
self.spans_search_by_name[name].append(span)
for tag in name:
self.spans_search_by_tag[tag].append(span)
if parent:
parent.children.append(span)
else:
self.root_spans.append(span)
return span
def find_by_tags(self, *tags: str) -> Iterator[Span]:
"""Yield all spans that contain any of the given tags.
When a tag is shared both by a span and its (grand)children, only return the
parent.
"""
by_level = defaultdict(list)
for tag in tags:
for sp in self.spans_search_by_tag[tag]:
by_level[len(sp.name)].append(sp)
seen = set()
for _, level in sorted(by_level.items()):
seen.update(level)
for sp in level:
if sp.parent not in seen:
yield sp
def merge_all(self) -> Span:
"""Return a synthetic Span which is the sum of all spans"""
return Span.merge(*self.root_spans)
def merge_by_tags(self, *tags: str) -> Span:
"""Return a synthetic Span which is the sum of all spans containing the given
tags
"""
return Span.merge(*self.find_by_tags(*tags))
def heartbeat(
self, ws: scheduler_module.WorkerState, data: dict[tuple[Hashable, ...], float]
) -> None:
"""Triggered by :meth:`SpansWorkerExtension.heartbeat`.
Populate :meth:`Span.cumulative_worker_metrics` with data from the worker.
See Also
--------
SpansWorkerExtension.heartbeat
Span.cumulative_worker_metrics
"""
for (context, span_id, *other), v in data.items():
assert isinstance(span_id, str)
span = self.spans[span_id]
span._cumulative_worker_metrics[(context, *other)] += v
| SpansSchedulerExtension |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.