language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 713505,
"end": 714161
} | class ____(sgqlc.types.Type, Node, UniformResourceLocatable):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "closable", "closer", "created_at", "state_reason")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
closable = sgqlc.types.Field(
sgqlc.types.non_null(Closable), graphql_name="closable"
)
closer = sgqlc.types.Field("Closer", graphql_name="closer")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
state_reason = sgqlc.types.Field(IssueStateReason, graphql_name="stateReason")
| ClosedEvent |
python | walkccc__LeetCode | solutions/785. Is Graph Bipartite?/785-2.py | {
"start": 24,
"end": 79
} | class ____(Enum):
WHITE = 0
RED = 1
GREEN = 2
| Color |
python | huggingface__transformers | src/transformers/models/bart/modeling_bart.py | {
"start": 18676,
"end": 19481
} | class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim: int,
inner_dim: int,
num_classes: int,
pooler_dropout: float,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
@auto_docstring
| BartClassificationHead |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_twodim_base.py | {
"start": 17543,
"end": 18692
} | class ____(TestCase):
def test_basic(self):
c = np.array([0, 1, -2, 3])
v = vander(c)
powers = np.array(
[[0, 0, 0, 0, 1], [1, 1, 1, 1, 1], [16, -8, 4, -2, 1], [81, 27, 9, 3, 1]]
)
# Check default value of N:
assert_array_equal(v, powers[:, 1:])
# Check a range of N values, including 0 and 5 (greater than default)
m = powers.shape[1]
for n in range(6):
v = vander(c, N=n)
assert_array_equal(v, powers[:, m - n : m])
def test_dtypes(self):
c = array([11, -12, 13], dtype=np.int8)
v = vander(c)
expected = np.array([[121, 11, 1], [144, -12, 1], [169, 13, 1]])
assert_array_equal(v, expected)
c = array([1.0 + 1j, 1.0 - 1j])
v = vander(c, N=3)
expected = np.array([[2j, 1 + 1j, 1], [-2j, 1 - 1j, 1]])
# The data is floating point, but the values are small integers,
# so assert_array_equal *should* be safe here (rather than, say,
# assert_array_almost_equal).
assert_array_equal(v, expected)
if __name__ == "__main__":
run_tests()
| TestVander |
python | ray-project__ray | release/llm_tests/serve/benchmark/benchmark_vllm.py | {
"start": 972,
"end": 8683
} | class ____:
HEADER = "\033[95m"
SERVER = "\033[94m"
CLIENT = "\033[92m"
WARNING = "\033[93m"
ERROR = "\033[91m"
ENDC = "\033[0m"
@staticmethod
def log_server(msg):
print(
f"{ColoredLogger.SERVER}[SERVER {get_timestamp()}] {msg}{ColoredLogger.ENDC}"
)
@staticmethod
def log_client(msg):
print(
f"{ColoredLogger.CLIENT}[CLIENT {get_timestamp()}] {msg}{ColoredLogger.ENDC}"
)
@staticmethod
def log_error(msg):
print(
f"{ColoredLogger.ERROR}[ERROR {get_timestamp()}] {msg}{ColoredLogger.ENDC}"
)
def stream_process_output(process, logger_func, stop_event, is_error: bool = False):
while not stop_event.is_set():
if is_error:
output_line = process.stderr.readline()
else:
output_line = process.stdout.readline()
if output_line:
logger_func(output_line.strip())
elif process.poll() is not None:
break
def get_vllm_cli_args(llm_config):
engine_kwargs = llm_config["engine_kwargs"]
# When we define tokenizer_pool size, vllm, by default, uses Ray
# that breaks the assumption that this script should not use ray
# TODO (Kourosh): When the job issue with non driver ray
# subprocesses are resolved we can remove these constraints
engine_kwargs.pop("tokenizer_pool_extra_config", None)
engine_kwargs.pop("tokenizer_pool_size", None)
engine_kwargs.pop("tokenizer_pool_type", None)
cli_args = ["--model", llm_config["model_loading_config"]["model_id"]]
for key, value in engine_kwargs.items():
cli_args.append("--" + key.replace("_", "-"))
if isinstance(value, dict):
cli_args.append(json.dumps(value))
elif isinstance(value, bool):
pass
else:
cli_args.append(str(value))
cli_args.extend(
["--tensor-parallel-size", str(engine_kwargs["tensor_parallel_size"])]
)
if "max_model_len" in engine_kwargs:
cli_args.extend(["--max-model-len", str(engine_kwargs["max_model_len"])])
return cli_args
def get_ray_options(llm_config):
num_gpus = llm_config["tensor_parallelism"]["degree"]
acc_type = llm_config["accelerator_type"]
resources = {f"accelerator_type:{acc_type}": 0.001}
return {"num_gpus": num_gpus, "resources": resources}
def start_vllm_process(vllm_cli_args):
server_process = subprocess.Popen(
["python", "-m", "vllm.entrypoints.openai.api_server"] + vllm_cli_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True, # Return strings from stdout/stderr
bufsize=1, # Line buffered
env=os.environ.copy(),
)
return server_process
def run_vllm_benchmark(vllm_cli_args):
stop_event = threading.Event()
results = {}
try:
# Start the server process
ColoredLogger.log_server("Starting vLLM server...")
server_process = start_vllm_process(vllm_cli_args)
# Start server output streaming thread
server_thread = threading.Thread(
target=stream_process_output,
args=(server_process, ColoredLogger.log_server, stop_event),
daemon=True, # Daemonize the thread so it stops when the main thread stops.
)
server_thread.start()
# Start server error streaming thread
server_error_thread = threading.Thread(
target=stream_process_output,
args=(server_process, ColoredLogger.log_server, stop_event),
kwargs={"is_error": True},
daemon=True, # Daemonize the thread so it stops when the main thread stops.
)
server_error_thread.start()
# Wait for server to be ready
server_ready = False
start_time = time.time()
timeout = 300 # 5 minutes timeout
while not server_ready and time.time() - start_time < timeout:
if server_process.poll() is not None:
raise Exception("Server process terminated unexpectedly")
# Check if server is responding
try:
import requests
response = requests.get("http://localhost:8000/health")
if response.status_code == 200:
server_ready = True
ColoredLogger.log_server("Server is ready!")
except Exception:
time.sleep(1)
continue
if not server_ready:
raise TimeoutError("Server failed to start within timeout period")
# Start benchmark
ColoredLogger.log_client("Starting benchmark...")
results = run_bm(
api_url="http://localhost:8000",
api_key="NONE",
concurrency=[1, 2, 4, 8, 16, 32],
run_time="1m",
prompt_tokens=256,
max_tokens=64,
stream=False,
summary_file="./results.csv",
)
print(
"Writing final result to AWS Firehose:",
json.dumps(results, indent=4, sort_keys=True),
sep="\n",
)
ColoredLogger.log_client("Benchmark completed successfully")
except Exception as e:
ColoredLogger.log_error(f"Error during benchmark: {str(e)}")
raise
finally:
# Clean up
if "server_process" in locals():
ColoredLogger.log_server("Shutting down server...")
server_process.terminate()
server_process.wait(timeout=THREAD_CLEANUP_TIMEOUT_S)
stop_event.set()
# Wait for all threads to complete
if "server_thread" in locals():
server_thread.join(timeout=THREAD_CLEANUP_TIMEOUT_S)
if "server_error_thread" in locals():
server_error_thread.join(timeout=THREAD_CLEANUP_TIMEOUT_S)
# Wait some time to make sure everyting is cleanend up.
time.sleep(5)
return results
def upload_results_to_s3(s3_path, results, service_metadata):
if any(result is None for result in results):
raise ValueError(
"Found None results during benchmarking. " "This should not have happened."
)
data_to_write = [{**result, **service_metadata} for result in results]
write_to_s3(data_to_write, s3_path)
def main(pargs):
llm_config = get_llm_config(pargs.llm_config)
vllm_cli_args = get_vllm_cli_args(llm_config)
results = run_vllm_benchmark(vllm_cli_args)
tag = f"{llm_config['accelerator_type']}-TP{llm_config['engine_kwargs']['tensor_parallel_size']}"
service_metadata = {
"cloud_name": "",
"service_name": "",
"py_version": f"py{sys.version_info.major}{sys.version_info.minor}",
"tag": tag,
"vllm_engine": f"V{os.environ.get('VLLM_USE_V1', '')}",
}
# Post the results to S3
if results:
print(
"Writing final result to AWS S3:",
json.dumps(results, indent=4, sort_keys=True),
sep="\n",
)
upload_results_to_s3(pargs.remote_result_path, results, service_metadata)
else:
raise ValueError(
"For some reason the benchmarking results are empty. Something is wrong."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--llm-config",
type=str,
required=True,
default="The LLM config to start vLLM engine",
)
parser.add_argument(
"--remote-result-path",
type=str,
required=True,
help="The remote s3 path to store intermediate results on.",
)
main(parser.parse_args())
| ColoredLogger |
python | astropy__astropy | astropy/coordinates/builtin_frames/ecliptic.py | {
"start": 5852,
"end": 6978
} | class ____(BaseEclipticFrame):
"""
Barycentric true ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000, doc="The equinox time")
doc_footer_helio = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
Defaults to the 'J2000' equinox.
obstime : `~astropy.time.Time`, optional
The time at which the observation is taken. Used for determining the
position of the Sun. Defaults to J2000.
"""
@format_doc(
base_doc,
components=doc_components_ecl.format("sun's center"),
footer=doc_footer_helio,
)
| BarycentricTrueEcliptic |
python | getsentry__sentry | src/sentry/api/serializers/models/groupsearchview.py | {
"start": 893,
"end": 3251
} | class ____(Serializer):
def __init__(self, *args, **kwargs):
self.organization = kwargs.pop("organization", None)
super().__init__(*args, **kwargs)
def get_attrs(self, item_list, user, **kwargs) -> MutableMapping[Any, Any]:
attrs: MutableMapping[Any, Any] = {}
last_visited_views = GroupSearchViewLastVisited.objects.filter(
organization=self.organization,
user_id=user.id,
group_search_view_id__in=[item.id for item in item_list],
)
user_starred_view_ids = set(
GroupSearchViewStarred.objects.filter(
organization=self.organization,
user_id=user.id,
).values_list("group_search_view_id", flat=True)
)
last_visited_map = {lv.group_search_view_id: lv for lv in last_visited_views}
serialized_users = {
user["id"]: user
for user in user_service.serialize_many(
filter={"user_ids": [view.user_id for view in item_list if view.user_id]},
as_user=user,
)
}
for item in item_list:
last_visited = last_visited_map.get(item.id, None)
attrs[item] = {}
if last_visited:
attrs[item]["last_visited"] = last_visited.last_visited
attrs[item]["starred"] = item.id in user_starred_view_ids
attrs[item]["stars"] = getattr(item, "popularity", 0)
attrs[item]["created_by"] = serialized_users.get(str(item.user_id))
return attrs
def serialize(self, obj, attrs, user, **kwargs) -> GroupSearchViewSerializerResponse:
projects = [-1] if obj.is_all_projects else list(obj.projects.values_list("id", flat=True))
return {
"id": str(obj.id),
"createdBy": attrs.get("created_by"),
"name": obj.name,
"query": obj.query,
"querySort": obj.query_sort,
"projects": projects,
"environments": obj.environments,
"timeFilters": obj.time_filters,
"lastVisited": attrs.get("last_visited", None),
"starred": attrs.get("starred", False),
"stars": attrs.get("stars", 0),
"dateCreated": obj.date_added,
"dateUpdated": obj.date_updated,
}
| GroupSearchViewSerializer |
python | PrefectHQ__prefect | src/prefect/server/utilities/schemas/bases.py | {
"start": 1871,
"end": 7151
} | class ____(BaseModel):
"""A base pydantic.BaseModel for all Prefect schemas and pydantic models.
As the basis for most Prefect schemas, this base model ignores extra
fields that are passed to it at instantiation. Because adding new fields to
API payloads is not considered a breaking change, this ensures that any
Prefect client loading data from a server running a possibly-newer version
of Prefect will be able to process those new fields gracefully.
"""
_reset_fields: ClassVar[set[str]] = set()
model_config: ClassVar[ConfigDict] = ConfigDict(
ser_json_timedelta="float",
extra="ignore",
ignored_types=(PrefectDescriptorBase,),
)
def __eq__(self, other: Any) -> bool:
"""Equality operator that ignores the resettable fields of the PrefectBaseModel.
NOTE: this equality operator will only be applied if the PrefectBaseModel is
the left-hand operand. This is a limitation of Python.
"""
copy_dict = self.model_dump(exclude=self._reset_fields)
if isinstance(other, PrefectBaseModel):
return copy_dict == other.model_dump(exclude=other._reset_fields)
if isinstance(other, BaseModel):
return copy_dict == other.model_dump()
else:
return copy_dict == other
def __rich_repr__(self) -> "RichReprResult":
# Display all of the fields in the model if they differ from the default value
for name, field in type(self).model_fields.items():
value = getattr(self, name)
# Simplify the display of some common fields
if isinstance(value, UUID):
value = str(value)
elif isinstance(value, datetime.datetime):
value = (
value.isoformat()
if name == "timestamp"
else human_friendly_diff(value)
)
yield name, value, field.get_default()
def reset_fields(self: Self) -> Self:
"""
Reset the fields of the model that are in the `_reset_fields` set.
Returns:
PrefectBaseModel: A new instance of the model with the reset fields.
"""
return self.model_copy(
update={
field: type(self)
.model_fields[field]
.get_default(call_default_factory=True)
for field in self._reset_fields
}
)
def model_dump_for_orm(
self,
*,
include: Optional["IncEx"] = None,
exclude: Optional["IncEx"] = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> dict[str, Any]:
"""
Prefect extension to `BaseModel.model_dump`. Generate a Python dictionary
representation of the model suitable for passing to SQLAlchemy model
constructors, `INSERT` statements, etc. The critical difference here is that
this method will return any nested BaseModel objects as `BaseModel` instances,
rather than serialized Python dictionaries.
Accepts the standard Pydantic `model_dump` arguments, except for `mode` (which
is always "python"), `round_trip`, and `warnings`.
Usage docs: https://docs.pydantic.dev/2.6/concepts/serialization/#modelmodel_dump
Args:
include: A list of fields to include in the output.
exclude: A list of fields to exclude from the output.
by_alias: Whether to use the field's alias in the dictionary key if defined.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that are set to their default
value.
exclude_none: Whether to exclude fields that have a value of `None`.
Returns:
A dictionary representation of the model, suitable for passing
to SQLAlchemy model constructors, INSERT statements, etc.
"""
# TODO: this could be optimized by excluding any fields that we know we are
# going to replace because they are `BaseModel` instances. This would involve
# understanding which fields would be included or excluded by model_dump so we
# could instruct Pydantic to exclude them up front.
deep = self.model_dump(
mode="python",
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
context={"for_orm": True},
)
for k, v in self:
if k in deep and isinstance(v, BaseModel):
deep[k] = v
return deep
def _ensure_fields_required(field_names: list[str], schema: JsonDict) -> None:
for field_name in field_names:
if "required" not in schema:
schema["required"] = []
if (
(required := schema.get("required"))
and isinstance(required, list)
and field_name not in required
):
required.append(field_name)
| PrefectBaseModel |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_filter_column.py | {
"start": 301,
"end": 880
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_filter_column() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_filter_column(self):
"""Test the _write_filter_column() method"""
self.worksheet._write_filter_column(0, 1, ["East"])
exp = """<filterColumn colId="0"><filters><filter val="East"/></filters></filterColumn>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteFilterColumn |
python | huggingface__transformers | tests/models/vivit/test_modeling_vivit.py | {
"start": 12561,
"end": 14852
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return VivitImageProcessor() if is_vision_available() else None
@slow
def test_inference_for_video_classification(self):
model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device)
image_processor = self.default_image_processor
video = prepare_video()
inputs = image_processor(video, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 400))
self.assertEqual(outputs.logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [-0.9498, 2.7971, -1.4049, 0.1024, -1.8353],
("cuda", 8): [-0.9498, 2.7971, -1.4049, 0.1025, -1.8353],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :5], expected_slice, rtol=2e-4, atol=2e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
# Vivit models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device)
image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
video = prepare_video()
inputs = image_processor(
video, size={"shortest_edge": 480}, crop_size={"height": 232, "width": 232}, return_tensors="pt"
)
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(pixel_values, interpolate_pos_encoding=True)
# verify the logits shape
expected_shape = torch.Size((1, 3137, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
| VivitModelIntegrationTest |
python | wandb__wandb | tests/unit_tests/test_launch/test_runner/test_vertex.py | {
"start": 301,
"end": 7313
} | class ____:
"""Mock of the CustomJob class from the Vertex SDK.
This is used to test the VertexSubmittedRun class which uses that object
to poll on the status of the job.
"""
def __init__(self, statuses: List[str]):
self.statuses = statuses
self.status_index = 0
@property
def state(self):
status = self.statuses[self.status_index]
self.status_index += 1
return f"JobState.JOB_STATE_{status}"
@property
def display_name(self):
return "test-display-name"
@property
def location(self):
return "test-location"
@property
def project(self):
return "test-project"
@property
def name(self):
return "test-name"
@pytest.mark.asyncio
async def test_vertex_submitted_run():
"""Test that the submitted run works as expected."""
job = MockCustomJob(["PENDING", "RUNNING", "SUCCEEDED", "FAILED"])
run = VertexSubmittedRun(job)
link = run.get_page_link()
assert (
link
== "https://console.cloud.google.com/vertex-ai/locations/test-location/training/test-name?project=test-project"
)
assert (await run.get_status()).state == "starting"
assert (await run.get_status()).state == "running"
assert (await run.get_status()).state == "finished"
assert (await run.get_status()).state == "failed"
def launch_project_factory(resource_args: dict, api: Api):
"""Construct a dummy LaunchProject with the given resource args."""
return LaunchProject(
api=api,
docker_config={
"docker_image": "test-image",
},
resource_args=resource_args,
uri="",
job="",
launch_spec={},
target_entity="",
target_project="",
name="",
git_info={},
overrides={},
resource="vertex",
run_id="",
)
@pytest.fixture
def vertex_runner(test_settings):
"""Vertex runner initialized with no backend config."""
registry = MagicMock()
environment = MagicMock()
async def _mock_get_credentials(*args, **kwargs):
return MagicMock()
async def _mock_verify(*args, **kwargs):
return MagicMock()
environment.get_credentials = _mock_get_credentials
environment.verify = _mock_verify
api = Api(default_settings=test_settings(), load_settings=False)
runner = VertexRunner(api, {"SYNCHRONOUS": False}, environment, registry)
return runner
@pytest.fixture
def mock_aiplatform(mocker):
"""Patch the aiplatform module with a mock object and return that object."""
mock = MagicMock()
def _fake_get_module(*args, **kwargs):
return mock
mocker.patch(
"wandb.sdk.launch.runner.vertex_runner.get_module",
side_effect=_fake_get_module,
)
return mock
@pytest.mark.asyncio
async def test_vertex_missing_worker_spec(vertex_runner):
"""Test that a launch error is raised when we are missing a worker spec."""
resource_args = {"vertex": {"worker_pool_specs": []}}
launch_project = launch_project_factory(resource_args, vertex_runner._api)
with pytest.raises(LaunchError) as e:
await vertex_runner.run(launch_project, "test-image")
assert "requires at least one worker pool spec" in str(e.value)
@pytest.mark.asyncio
async def test_vertex_missing_staging_bucket(vertex_runner):
"""Test that a launch error is raised when we are missing a staging bucket."""
resource_args = {
"vertex": {
"spec": {
"worker_pool_specs": [
{
"machine_spec": {"machine_type": "n1-standard-4"},
"replica_count": 1,
"container_spec": {"image_uri": "test-image"},
}
]
}
}
}
launch_project = launch_project_factory(resource_args, vertex_runner._api)
with pytest.raises(LaunchError) as e:
await vertex_runner.run(launch_project, "test-image")
assert "requires a staging bucket" in str(e.value)
@pytest.mark.asyncio
async def test_vertex_missing_image(vertex_runner):
"""Test that a launch error is raised when we are missing an image."""
resource_args = {
"vertex": {
"spec": {
"worker_pool_specs": [
{
"machine_spec": {"machine_type": "n1-standard-4"},
"replica_count": 1,
},
{
"machine_spec": {"machine_type": "n1-standard-4"},
"replica_count": 1,
"container_spec": {"image_uri": "test-image"},
},
],
"stage_bucket": "test-bucket",
}
}
}
launch_project = launch_project_factory(resource_args, vertex_runner._api)
with pytest.raises(LaunchError) as e:
await vertex_runner.run(launch_project, "test-image")
assert "requires a container spec" in str(e.value)
@pytest.mark.asyncio
async def test_vertex_runner_works(vertex_runner, mock_aiplatform):
"""Test that the vertex runner works as expected with good inputs."""
resource_args = {
"vertex": {
"spec": {
"worker_pool_specs": [
{
"machine_spec": {"machine_type": "n1-standard-4"},
"replica_count": 2,
"container_spec": {"image_uri": "test-image"},
},
{
"machine_spec": {"machine_type": "n1-standard-8"},
"replica_count": 1,
"container_spec": {"image_uri": "${image_uri}"},
},
],
"staging_bucket": "test-bucket",
}
}
}
launch_project = launch_project_factory(resource_args, vertex_runner._api)
submitted_run = await vertex_runner.run(launch_project, "test-image")
mock_aiplatform.init()
mock_aiplatform.CustomJob.assert_called_once()
submitted_spec = mock_aiplatform.CustomJob.call_args[1]["worker_pool_specs"]
assert len(submitted_spec) == 2
assert submitted_spec[0]["machine_spec"]["machine_type"] == "n1-standard-4"
assert submitted_spec[0]["replica_count"] == 2
assert submitted_spec[0]["container_spec"]["image_uri"] == "test-image"
assert submitted_spec[1]["machine_spec"]["machine_type"] == "n1-standard-8"
assert submitted_spec[1]["replica_count"] == 1
# This assertion tests macro substitution of the image uri.
assert submitted_spec[1]["container_spec"]["image_uri"] == "test-image"
submitted_run._job = MockCustomJob(["PENDING", "RUNNING", "SUCCEEDED"])
assert (await submitted_run.get_status()).state == "starting"
assert (await submitted_run.get_status()).state == "running"
assert (await submitted_run.get_status()).state == "finished"
| MockCustomJob |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/router/base.py | {
"start": 1657,
"end": 4777
} | class ____(Chain):
"""Use a single chain to route an input to one of multiple candidate chains."""
router_chain: RouterChain
"""Chain that routes inputs to destination chains."""
destination_chains: Mapping[str, Chain]
"""Chains that return final answer to inputs."""
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If `True`, use default_chain when an invalid destination name is provided."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the router chain prompt expects."""
return self.router_chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Will always return text key."""
return []
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs),
verbose=self.verbose,
)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
if route.destination in self.destination_chains:
return self.destination_chains[route.destination](
route.next_inputs,
callbacks=callbacks,
)
if self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = await self.router_chain.aroute(inputs, callbacks=callbacks)
await _run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs),
verbose=self.verbose,
)
if not route.destination:
return await self.default_chain.acall(
route.next_inputs,
callbacks=callbacks,
)
if route.destination in self.destination_chains:
return await self.destination_chains[route.destination].acall(
route.next_inputs,
callbacks=callbacks,
)
if self.silent_errors:
return await self.default_chain.acall(
route.next_inputs,
callbacks=callbacks,
)
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
| MultiRouteChain |
python | ray-project__ray | python/ray/train/v2/_internal/metrics/controller.py | {
"start": 247,
"end": 2360
} | class ____:
"""Factory for creating controller-specific metrics.
This class defines all metrics used to track the state and performance of the
training controller. Each metric is defined with its name, type, default value,
description, and required tags.
"""
# ===== Metric Names =====
CONTROLLER_STATE = "train_controller_state"
WORKER_GROUP_START_TOTAL_TIME_S = "train_worker_group_start_total_time_s"
WORKER_GROUP_SHUTDOWN_TOTAL_TIME_S = "train_worker_group_shutdown_total_time_s"
# ===== Tag Keys =====
CONTROLLER_STATE_TAG_KEY = "ray_train_controller_state"
@classmethod
def _create_time_metric(
cls, name: str, description: str, base_tags: Dict[str, str]
) -> TimeMetric:
return TimeMetric(
name=name,
description=description,
base_tags=base_tags,
)
@classmethod
def _create_controller_state_metric(
cls, base_tags: Dict[str, str]
) -> EnumMetric[TrainControllerStateType]:
return EnumMetric[TrainControllerStateType](
name=cls.CONTROLLER_STATE,
description="Current state of the Ray Train controller",
base_tags=base_tags,
enum_tag_key=cls.CONTROLLER_STATE_TAG_KEY,
)
@classmethod
def get_controller_metrics(
cls, run_name: str, run_id: str
) -> Dict[str, Union[TimeMetric, EnumMetric[TrainControllerStateType]]]:
base_tags = {RUN_NAME_TAG_KEY: run_name, RUN_ID_TAG_KEY: run_id}
return {
cls.WORKER_GROUP_START_TOTAL_TIME_S: cls._create_time_metric(
cls.WORKER_GROUP_START_TOTAL_TIME_S,
"Total time taken to start the worker group",
base_tags,
),
cls.WORKER_GROUP_SHUTDOWN_TOTAL_TIME_S: cls._create_time_metric(
cls.WORKER_GROUP_SHUTDOWN_TOTAL_TIME_S,
"Total time taken to shutdown the worker group",
base_tags,
),
cls.CONTROLLER_STATE: cls._create_controller_state_metric(base_tags),
}
| ControllerMetrics |
python | python-markdown__markdown | markdown/blockprocessors.py | {
"start": 25505,
"end": 27013
} | class ____(BlockProcessor):
""" Process Paragraph blocks. """
def test(self, parent: etree.Element, block: str) -> bool:
return True
def run(self, parent: etree.Element, blocks: list[str]) -> None:
block = blocks.pop(0)
if block.strip():
# Not a blank block. Add to parent, otherwise throw it away.
if self.parser.state.isstate('list'):
# The parent is a tight-list.
#
# Check for any children. This will likely only happen in a
# tight-list when a header isn't followed by a blank line.
# For example:
#
# * # Header
# Line 2 of list item - not part of header.
sibling = self.lastChild(parent)
if sibling is not None:
# Insert after sibling.
if sibling.tail:
sibling.tail = '{}\n{}'.format(sibling.tail, block)
else:
sibling.tail = '\n%s' % block
else:
# Append to parent.text
if parent.text:
parent.text = '{}\n{}'.format(parent.text, block)
else:
parent.text = block.lstrip()
else:
# Create a regular paragraph
p = etree.SubElement(parent, 'p')
p.text = block.lstrip()
| ParagraphProcessor |
python | graphql-python__graphene | examples/context_example.py | {
"start": 18,
"end": 105
} | class ____(graphene.ObjectType):
id = graphene.ID()
name = graphene.String()
| User |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_math_ops_test.py | {
"start": 11124,
"end": 11581
} | class ____(test_util.TensorFlowTestCase):
def testRounding(self):
x = np.arange(-5.0, 5.0, .25)
for dtype in [np.float32, np.double, np.int32]:
x_np = np.array(x, dtype=dtype)
with test_util.device(use_gpu=True):
x_tf = _get_weak_tensor(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = self.evaluate(y_tf)
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
| RoundTest |
python | huggingface__transformers | src/transformers/models/omdet_turbo/processing_omdet_turbo.py | {
"start": 2104,
"end": 7725
} | class ____(dict):
message = (
"The `classes` key is deprecated for `OmDetTurboProcessor.post_process_grounded_object_detection` "
"output dict and will be removed in a 4.51.0 version. Please use `text_labels` instead."
)
def __getitem__(self, key):
if key == "classes":
warnings.warn(self.message, FutureWarning)
return super().__getitem__("text_labels")
return super().__getitem__(key)
def get(self, key, *args, **kwargs):
if key == "classes":
warnings.warn(self.message, FutureWarning)
return super().get("text_labels", *args, **kwargs)
return super().get(key, *args, **kwargs)
def clip_boxes(box, box_size: tuple[int, int]):
"""
Clip the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box (Tensor): The box to be clipped.
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(box).all(), "Box tensor contains infinite or NaN!"
height, width = box_size
x1 = box[:, 0].clamp(min=0, max=width)
y1 = box[:, 1].clamp(min=0, max=height)
x2 = box[:, 2].clamp(min=0, max=width)
y2 = box[:, 3].clamp(min=0, max=height)
box = torch.stack((x1, y1, x2, y2), dim=-1)
return box
def compute_score(boxes):
"""
Compute logit scores per class for each box (proposal) and an array of class indices
corresponding to each proposal, flattened across the proposal_num.
The indices in `classes` will later be used to filter and match the predicted classes
with the input class names.
"""
num_classes = boxes.shape[2]
proposal_num = boxes.shape[1]
scores = torch.sigmoid(boxes)
classes = torch.arange(num_classes, device=boxes.device).unsqueeze(0).repeat(proposal_num, 1).flatten(0, 1)
return scores, classes
def _post_process_boxes_for_image(
boxes: "torch.Tensor",
scores: "torch.Tensor",
labels: "torch.Tensor",
image_num_classes: int,
image_size: tuple[int, int],
threshold: float,
nms_threshold: float,
max_num_det: Optional[int] = None,
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]:
"""
Filter predicted results using given thresholds and NMS.
Args:
boxes (`torch.Tensor`):
A Tensor of predicted class-specific or class-agnostic boxes for the image.
Shape (num_queries, max_num_classes_in_batch * 4) if doing class-specific regression,
or (num_queries, 4) if doing class-agnostic regression.
scores (`torch.Tensor` of shape (num_queries, max_num_classes_in_batch + 1)):
A Tensor of predicted class scores for the image.
labels (`torch.Tensor` of shape (num_queries * (max_num_classes_in_batch + 1),)):
A Tensor of predicted labels for the image.
image_num_classes (`int`):
The number of classes queried for detection on the image.
image_size (`tuple[int, int]`):
A tuple of (height, width) for the image.
threshold (`float`):
Only return detections with a confidence score exceeding this threshold.
nms_threshold (`float`):
The threshold to use for box non-maximum suppression. Value in [0, 1].
max_num_det (`int`, *optional*):
The maximum number of detections to return. Default is None.
Returns:
Tuple: A tuple with the following:
"boxes" (Tensor): A tensor of shape (num_filtered_objects, 4), containing the predicted boxes in (x1, y1, x2, y2) format.
"scores" (Tensor): A tensor of shape (num_filtered_objects,), containing the predicted confidence scores for each detection.
"labels" (Tensor): A tensor of ids, where each id is the predicted class id for the corresponding detection
"""
# Filter by max number of detections
proposal_num = len(boxes) if max_num_det is None else max_num_det
scores_per_image, topk_indices = scores.flatten(0, 1).topk(proposal_num, sorted=False)
labels_per_image = labels[topk_indices]
boxes_per_image = boxes.view(-1, 1, 4).repeat(1, scores.shape[1], 1).view(-1, 4)
boxes_per_image = boxes_per_image[topk_indices]
# Convert and scale boxes to original image size
boxes_per_image = center_to_corners_format(boxes_per_image)
boxes_per_image = boxes_per_image * torch.tensor(image_size[::-1]).repeat(2).to(boxes_per_image.device)
# Filtering by confidence score
filter_mask = scores_per_image > threshold # R x K
score_keep = filter_mask.nonzero(as_tuple=False).view(-1)
boxes_per_image = boxes_per_image[score_keep]
scores_per_image = scores_per_image[score_keep]
labels_per_image = labels_per_image[score_keep]
# Ensure we did not overflow to non existing classes
filter_classes_mask = labels_per_image < image_num_classes
classes_keep = filter_classes_mask.nonzero(as_tuple=False).view(-1)
boxes_per_image = boxes_per_image[classes_keep]
scores_per_image = scores_per_image[classes_keep]
labels_per_image = labels_per_image[classes_keep]
# NMS
keep = batched_nms(boxes_per_image, scores_per_image, labels_per_image, nms_threshold)
boxes_per_image = boxes_per_image[keep]
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
# Clip to image size
boxes_per_image = clip_boxes(boxes_per_image, image_size)
return boxes_per_image, scores_per_image, labels_per_image
@requires(backends=("vision", "torchvision"))
| DictWithDeprecationWarning |
python | readthedocs__readthedocs.org | readthedocs/organizations/views/private.py | {
"start": 6445,
"end": 6618
} | class ____(PrivateViewMixin, OrganizationTeamView, UpdateView):
template_name = "organizations/team_edit.html"
success_message = _("Team updated")
| EditOrganizationTeam |
python | pypa__warehouse | tests/unit/oidc/models/test_gitlab.py | {
"start": 35837,
"end": 38170
} | class ____:
def test_reify_does_not_exist_yet(self, db_request):
pending_publisher = PendingGitLabPublisherFactory.create()
assert (
db_request.db.query(gitlab.GitLabPublisher)
.filter_by(
project=pending_publisher.project,
namespace=pending_publisher.namespace,
workflow_filepath=pending_publisher.workflow_filepath,
environment=pending_publisher.environment,
)
.one_or_none()
is None
)
publisher = pending_publisher.reify(db_request.db)
# If an OIDC publisher for this pending publisher does not already exist,
# a new one is created and the pending publisher is marked for deletion.
assert isinstance(publisher, gitlab.GitLabPublisher)
assert pending_publisher in db_request.db.deleted
assert publisher.project == pending_publisher.project
assert publisher.namespace == pending_publisher.namespace
assert publisher.workflow_filepath == pending_publisher.workflow_filepath
assert publisher.environment == pending_publisher.environment
def test_reify_already_exists(self, db_request):
existing_publisher = GitLabPublisherFactory.create()
pending_publisher = PendingGitLabPublisherFactory.create(
project=existing_publisher.project,
namespace=existing_publisher.namespace,
workflow_filepath=existing_publisher.workflow_filepath,
environment=existing_publisher.environment,
)
publisher = pending_publisher.reify(db_request.db)
# If an OIDC publisher for this pending publisher already exists,
# it is returned and the pending publisher is marked for deletion.
assert existing_publisher == publisher
assert pending_publisher in db_request.db.deleted
def test_reify_with_custom_issuer_url(self, db_request):
custom_issuer_url = "https://gitlab.custom-domain.com"
pending_publisher = PendingGitLabPublisherFactory.create(
issuer_url=custom_issuer_url
)
publisher = pending_publisher.reify(db_request.db)
assert publisher.issuer_url == custom_issuer_url
assert isinstance(publisher, gitlab.GitLabPublisher)
| TestPendingGitLabPublisher |
python | huggingface__transformers | src/transformers/models/tvp/modeling_tvp.py | {
"start": 35041,
"end": 37920
} | class ____(TvpPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.model = TvpModel(config)
self.video_grounding_head = TvpVideoGroundingHead(config)
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
labels: Optional[tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
):
r"""
labels (`torch.FloatTensor` of shape `(batch_size, 3)`, *optional*):
The labels contains duration, start time, and end time of the video corresponding to the text.
Examples:
```python
>>> import torch
>>> from transformers import AutoConfig, AutoTokenizer, TvpForVideoGrounding
>>> model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp")
>>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp")
>>> pixel_values = torch.rand(1, 1, 3, 448, 448)
>>> text_inputs = tokenizer("This is an example input", return_tensors="pt")
>>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask)
```"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
outputs = self.model(
input_ids,
pixel_values,
attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
interpolate_pos_encoding=interpolate_pos_encoding,
)
pooler_output = outputs[1]
logits = self.video_grounding_head(pooler_output)
loss = None
if labels is not None:
criterion = TvpLoss(["iou", "distance", "duration"])
criterion.to(self.device)
loss_dict = criterion(logits, labels)
loss = (
loss_dict["iou"]
+ self.config.distance_loss_weight * loss_dict["distance"]
+ self.config.duration_loss_weight * loss_dict["duration"]
)
if not return_dict:
outputs = (logits,) + outputs[2:]
if loss is not None:
outputs = (loss,) + outputs
return outputs
return TvpVideoGroundingOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["TvpModel", "TvpPreTrainedModel", "TvpForVideoGrounding"]
| TvpForVideoGrounding |
python | etianen__django-reversion | reversion/admin.py | {
"start": 1108,
"end": 1218
} | class ____(Exception):
def __init__(self, response):
self.response = response
| _RollBackRevisionView |
python | kubernetes-client__python | kubernetes/client/models/v2_external_metric_status.py | {
"start": 383,
"end": 4471
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'current': 'V2MetricValueStatus',
'metric': 'V2MetricIdentifier'
}
attribute_map = {
'current': 'current',
'metric': 'metric'
}
def __init__(self, current=None, metric=None, local_vars_configuration=None): # noqa: E501
"""V2ExternalMetricStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._current = None
self._metric = None
self.discriminator = None
self.current = current
self.metric = metric
@property
def current(self):
"""Gets the current of this V2ExternalMetricStatus. # noqa: E501
:return: The current of this V2ExternalMetricStatus. # noqa: E501
:rtype: V2MetricValueStatus
"""
return self._current
@current.setter
def current(self, current):
"""Sets the current of this V2ExternalMetricStatus.
:param current: The current of this V2ExternalMetricStatus. # noqa: E501
:type: V2MetricValueStatus
"""
if self.local_vars_configuration.client_side_validation and current is None: # noqa: E501
raise ValueError("Invalid value for `current`, must not be `None`") # noqa: E501
self._current = current
@property
def metric(self):
"""Gets the metric of this V2ExternalMetricStatus. # noqa: E501
:return: The metric of this V2ExternalMetricStatus. # noqa: E501
:rtype: V2MetricIdentifier
"""
return self._metric
@metric.setter
def metric(self, metric):
"""Sets the metric of this V2ExternalMetricStatus.
:param metric: The metric of this V2ExternalMetricStatus. # noqa: E501
:type: V2MetricIdentifier
"""
if self.local_vars_configuration.client_side_validation and metric is None: # noqa: E501
raise ValueError("Invalid value for `metric`, must not be `None`") # noqa: E501
self._metric = metric
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2ExternalMetricStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2ExternalMetricStatus):
return True
return self.to_dict() != other.to_dict()
| V2ExternalMetricStatus |
python | ray-project__ray | release/release_logs/fetch_release_logs.py | {
"start": 1990,
"end": 2067
} | class ____:
build: Build
id: str
name: Optional[str]
@dataclass
| Job |
python | numpy__numpy | benchmarks/benchmarks/bench_reduce.py | {
"start": 1063,
"end": 1725
} | class ____(Benchmark):
params = ['int64', 'uint64', 'float32', 'float64', 'complex64', 'bool_'],
param_names = ['dtype']
def setup(self, dtype):
self.data = np.ones(200, dtype=dtype)
if dtype.startswith('complex'):
self.data = self.data * self.data.T * 1j
def time_min(self, dtype):
np.min(self.data)
def time_max(self, dtype):
np.max(self.data)
def time_mean(self, dtype):
np.mean(self.data)
def time_std(self, dtype):
np.std(self.data)
def time_prod(self, dtype):
np.prod(self.data)
def time_var(self, dtype):
np.var(self.data)
| StatsReductions |
python | PyCQA__pylint | tests/functional/r/regression_02/regression_8109.py | {
"start": 191,
"end": 393
} | class ____:
amount: int | float
round: int = 2
def __str__(self):
number_format = "{:,.%sf}" % self.round
return number_format.format(self.amount).rstrip("0").rstrip(".")
| Number |
python | getsentry__sentry | src/sentry/migrations/1009_add_date_updated_to_organizationmapping.py | {
"start": 249,
"end": 2082
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "1008_loosen_unique_title_contraint"),
]
operations = [
# This migration had to be reverted after it was merged and ran in some environments.
# Clean up the previous attempt and try again
SafeRunSQL(
sql="""
ALTER TABLE "sentry_organizationmapping" DROP COLUMN IF EXISTS "date_updated";
""",
reverse_sql="",
hints={"tables": ["sentry_organizationmapping"]},
),
migrations.AddField(
model_name="organizationmapping",
name="date_updated",
field=models.DateTimeField(
db_default=django.db.models.functions.Now(),
auto_now=True,
db_index=True,
),
),
]
| Migration |
python | kamyu104__LeetCode-Solutions | Python/max-sum-of-sub-matrix-no-larger-than-k.py | {
"start": 1234,
"end": 3314
} | class ____(object):
def maxSumSubmatrix(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
class BST(object): # not avl, rbtree
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def insert(self, val): # Time: O(h) = O(logn) ~ O(n)
curr = self
while curr:
if curr.val >= val:
if curr.left:
curr = curr.left
else:
curr.left = BST(val)
return
else:
if curr.right:
curr = curr.right
else:
curr.right = BST(val)
return
def lower_bound(self, val): # Time: O(h) = O(logn) ~ O(n)
result, curr = None, self
while curr:
if curr.val >= val:
result, curr = curr, curr.left
else:
curr = curr.right
return result
if not matrix:
return 0
m = min(len(matrix), len(matrix[0]))
n = max(len(matrix), len(matrix[0]))
result = float("-inf")
for i in xrange(m):
sums = [0] * n
for j in xrange(i, m):
for l in xrange(n):
sums[l] += matrix[j][l] if m == len(matrix) else matrix[l][j]
# Find the max subarray no more than K.
accu_sum_set = BST(0)
accu_sum = 0
for sum in sums:
accu_sum += sum
node = accu_sum_set.lower_bound(accu_sum - k)
if node:
result = max(result, accu_sum - node.val)
accu_sum_set.insert(accu_sum)
return result
| Solution_TLE |
python | kamyu104__LeetCode-Solutions | Python/longest-common-prefix.py | {
"start": 527,
"end": 897
} | class ____(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
prefix = ""
for chars in zip(*strs):
if all(c == chars[0] for c in chars):
prefix += chars[0]
else:
return prefix
return prefix
| Solution2 |
python | great-expectations__great_expectations | great_expectations/profile/base.py | {
"start": 2466,
"end": 2707
} | class ____(Enum):
"""Useful data types for building profilers."""
INT = "int"
FLOAT = "float"
NUMERIC = "numeric"
STRING = "string"
BOOLEAN = "boolean"
DATETIME = "datetime"
UNKNOWN = "unknown"
| ProfilerDataType |
python | encode__django-rest-framework | rest_framework/test.py | {
"start": 9450,
"end": 10012
} | class ____(ClientHandler):
"""
A patched version of ClientHandler that can enforce authentication
on the outgoing requests.
"""
def __init__(self, *args, **kwargs):
self._force_user = None
self._force_token = None
super().__init__(*args, **kwargs)
def get_response(self, request):
# This is the simplest place we can hook into to patch the
# request object.
force_authenticate(request, self._force_user, self._force_token)
return super().get_response(request)
| ForceAuthClientHandler |
python | streamlit__streamlit | lib/tests/streamlit/web/cli_test.py | {
"start": 1396,
"end": 26394
} | class ____(unittest.TestCase):
"""Unit tests for the cli."""
def setUp(self):
# Credentials._singleton should be None here, but a mis-behaving
# test may have left it intact.
Credentials._singleton = None
cli.name = "streamlit"
self.runner = CliRunner()
self.patches = [
patch.object(config._on_config_parsed, "send"),
# Make sure the calls to `streamlit run` in this file don't unset
# the config options loaded in conftest.py.
patch.object(streamlit.web.bootstrap, "load_config_options"),
]
for p in self.patches:
p.start()
def tearDown(self):
Credentials._singleton = None
for p in self.patches:
p.stop()
def test_run_no_file_argument_but_default_exists(self):
"""streamlit run should succeed when run with no arguments and the default file exists."""
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.cli._main_run") as mock_main_run,
patch("pathlib.Path.exists", return_value=True),
):
result = self.runner.invoke(cli, ["run"])
assert result.exit_code == 0
mock_main_run.assert_called_once()
positional_args = mock_main_run.call_args[0]
assert positional_args[0] == "streamlit_app.py"
def test_run_no_file_argument_and_default_doesnt_exist(self):
"""streamlit run should fail if run with no arguments and default file doesn't exist."""
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.cli._main_run"),
patch("pathlib.Path.exists", return_value=False),
):
result = self.runner.invoke(cli, ["run", "file_name.py"])
assert result.exit_code != 0
def test_run_existing_file_argument(self):
"""streamlit run succeeds if an existing file is passed."""
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.cli._main_run") as mock_main_run,
patch("pathlib.Path.exists", return_value=True),
):
result = self.runner.invoke(cli, ["run", "file_name.py"])
assert result.exit_code == 0
mock_main_run.assert_called_once()
positional_args = mock_main_run.call_args[0]
assert positional_args[0] == "file_name.py"
def test_run_existing_path_argument(self):
"""streamlit run succeeds if an existing path is passed."""
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.cli._main_run") as mock_main_run,
patch("pathlib.Path.exists", return_value=True),
patch("pathlib.Path.is_dir", return_value=True),
):
result = self.runner.invoke(cli, ["run", "foo/bar"])
assert result.exit_code == 0
mock_main_run.assert_called_once()
positional_args = mock_main_run.call_args[0]
assert positional_args[0] == "foo/bar/streamlit_app.py"
def test_run_non_existing_file_argument(self):
"""streamlit run should fail if a non existing file is passed."""
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.cli._main_run"),
patch("pathlib.Path.exists", return_value=False),
):
result = self.runner.invoke(cli, ["run", "file_name.py"])
assert result.exit_code != 0
assert "File does not exist" in result.output
def test_run_not_allowed_file_extension(self):
"""streamlit run should fail if a not allowed file extension is passed."""
result = self.runner.invoke(cli, ["run", "file_name.doc"])
assert result.exit_code != 0
assert "Streamlit requires raw Python (.py) files, not .doc." in result.output
@tempdir()
def test_run_valid_url(self, temp_dir):
"""streamlit run succeeds if an existing url is passed."""
with (
patch("streamlit.url_util.is_url", return_value=True),
patch("streamlit.web.cli._main_run"),
requests_mock.mock() as m,
):
file_content = b"content"
m.get("http://url/app.py", content=file_content)
with patch("streamlit.temporary_directory.TemporaryDirectory") as mock_tmp:
mock_tmp.return_value.__enter__.return_value = temp_dir.path
result = self.runner.invoke(cli, ["run", "http://url/app.py"])
with open(os.path.join(temp_dir.path, "app.py"), "rb") as f:
assert file_content == f.read()
assert result.exit_code == 0
@tempdir()
def test_run_non_existing_url(self, temp_dir):
"""streamlit run should fail if a non existing but valid
url is passed.
"""
with (
patch("streamlit.url_util.is_url", return_value=True),
patch("streamlit.web.cli._main_run"),
requests_mock.mock() as m,
):
m.get("http://url/app.py", exc=requests.exceptions.RequestException)
with patch("streamlit.temporary_directory.TemporaryDirectory") as mock_tmp:
mock_tmp.return_value.__enter__.return_value = temp_dir.path
result = self.runner.invoke(cli, ["run", "http://url/app.py"])
assert result.exit_code != 0
assert "Unable to fetch" in result.output
def test_run_arguments(self):
"""The correct command line should be passed downstream."""
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("pathlib.Path.exists", return_value=True),
patch("streamlit.web.cli._main_run") as mock_main_run,
):
result = self.runner.invoke(
cli,
[
"run",
"some script.py",
"argument with space",
"argument with another space",
],
)
mock_main_run.assert_called_once()
positional_args = mock_main_run.call_args[0]
assert positional_args[0] == "some script.py"
assert positional_args[1] == (
"argument with space",
"argument with another space",
)
assert result.exit_code == 0
def test_run_command_with_flag_config_options(self):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.bootstrap.run"),
patch("pathlib.Path.exists", return_value=True),
patch("streamlit.web.cli.check_credentials"),
):
result = self.runner.invoke(
cli, ["run", "file_name.py", "--server.port=8502"]
)
streamlit.web.bootstrap.load_config_options.assert_called_once()
_args, kwargs = streamlit.web.bootstrap.load_config_options.call_args
assert kwargs["flag_options"]["server_port"] == 8502
assert result.exit_code == 0
def test_run_command_with_multiple_secrets_path_single_value(self):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.bootstrap.run"),
patch("pathlib.Path.exists", return_value=True),
patch("streamlit.web.cli.check_credentials"),
):
result = self.runner.invoke(
cli, ["run", "file_name.py", "--secrets.files=secrets1.toml"]
)
streamlit.web.bootstrap.load_config_options.assert_called_once()
_args, kwargs = streamlit.web.bootstrap.load_config_options.call_args
assert kwargs["flag_options"]["secrets_files"] == ("secrets1.toml",)
assert result.exit_code == 0
def test_run_command_with_multiple_secrets_path_multiple_value(self):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.bootstrap.run"),
patch("pathlib.Path.exists", return_value=True),
patch("streamlit.web.cli.check_credentials"),
):
result = self.runner.invoke(
cli,
[
"run",
"file_name.py",
"--secrets.files=secrets1.toml",
"--secrets.files=secrets2.toml",
],
)
streamlit.web.bootstrap.load_config_options.assert_called_once()
_args, kwargs = streamlit.web.bootstrap.load_config_options.call_args
assert kwargs["flag_options"]["secrets_files"] == (
"secrets1.toml",
"secrets2.toml",
)
assert result.exit_code == 0
@parameterized.expand(["mapbox.token", "server.cookieSecret"])
def test_run_command_with_sensitive_options_as_flag(self, sensitive_option):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.cli._main_run"),
patch("pathlib.Path.exists", return_value=True),
):
result = self.runner.invoke(
cli, ["run", "file_name.py", f"--{sensitive_option}=TESTSECRET"]
)
assert "option using the CLI flag is not allowed" in result.output
assert result.exit_code == 1
def test_get_command_line(self):
"""Test that _get_command_line_as_string correctly concatenates values
from click.
"""
mock_context = MagicMock()
mock_context.parent.command_path = "streamlit"
with patch("click.get_current_context", return_value=mock_context):
with patch.object(sys, "argv", ["", "os_arg1", "os_arg2"]):
result = cli._get_command_line_as_string()
assert result == "streamlit os_arg1 os_arg2"
def test_get_command_line_without_parent_context(self):
"""Test that _get_command_line_as_string correctly returns None when
there is no context parent
"""
mock_context = MagicMock()
mock_context.parent = None
with patch("click.get_current_context", return_value=mock_context):
result = cli._get_command_line_as_string()
assert result is None
def test_convert_config_option_to_click_option(self):
"""Test that configurator_options adds dynamic commands based on a
config lists.
"""
config_option = ConfigOption(
"server.customKey",
description="Custom description.\n\nLine one.",
deprecated=False,
type_=int,
)
result = _convert_config_option_to_click_option(config_option)
assert result["option"] == "--server.customKey"
assert result["param"] == "server_customKey"
assert result["type"] == config_option.type
assert result["description"] == config_option.description
assert result["envvar"] == "STREAMLIT_SERVER_CUSTOM_KEY"
def test_convert_depecated_config_option_to_click_option(self):
"""Test that configurator_options adds extra deprecation information
to config option's description
"""
config_option = ConfigOption(
"deprecated.customKey",
description="Custom description.\n\nLine one.",
deprecated=True,
deprecation_text="Foo",
expiration_date="Bar",
type_=int,
)
result = _convert_config_option_to_click_option(config_option)
assert result["description"] == "Custom description.\n\nLine one.\n Foo - Bar"
def test_credentials_headless_no_config(self):
"""If headless mode and no config is present,
activation should be None."""
with testutil.patch_config_options({"server.headless": True}):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.bootstrap.run"),
patch("pathlib.Path.exists", return_value=True),
patch(
"streamlit.runtime.credentials._check_credential_file_exists",
return_value=False,
),
):
result = self.runner.invoke(cli, ["run", "some script.py"])
from streamlit.runtime.credentials import Credentials
credentials = Credentials.get_current()
assert credentials.activation is None
assert result.exit_code == 0
@parameterized.expand([(True,), (False,)])
def test_credentials_headless_with_config(self, headless_mode):
"""If headless, but a config file is present, activation should be
defined.
So we call `_check_activated`.
"""
with testutil.patch_config_options({"server.headless": headless_mode}):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.bootstrap.run"),
patch("pathlib.Path.exists", return_value=True),
mock.patch(
"streamlit.runtime.credentials.Credentials._check_activated"
) as mock_check,
patch(
"streamlit.runtime.credentials._check_credential_file_exists",
return_value=True,
),
):
result = self.runner.invoke(cli, ["run", "some script.py"])
assert mock_check.called
assert result.exit_code == 0
@parameterized.expand([(True,), (False,)])
def test_headless_telemetry_message(self, headless_mode):
"""If headless mode, show a message about usage metrics gathering."""
with testutil.patch_config_options({"server.headless": headless_mode}):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("pathlib.Path.exists", return_value=True),
patch("streamlit.config.is_manually_set", return_value=False),
patch(
"streamlit.runtime.credentials._check_credential_file_exists",
return_value=False,
),
patch("streamlit.web.bootstrap.run"),
):
result = self.runner.invoke(cli, ["run", "file_name.py"])
assert ("Collecting usage statistics" in result.output) == headless_mode, (
f"Telemetry message mode is {headless_mode} "
f"yet output is: {result.output}"
)
@parameterized.expand([(False, False), (False, True), (True, False), (True, True)])
def test_prompt_welcome_message(self, prompt_mode, headless_mode):
"""If prompt is true, show a welcome prompt, unless headless."""
with testutil.patch_config_options(
{"server.showEmailPrompt": prompt_mode, "server.headless": headless_mode}
):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("pathlib.Path.exists", return_value=True),
patch("streamlit.config.is_manually_set", return_value=False),
patch(
"streamlit.runtime.credentials._check_credential_file_exists",
return_value=False,
),
patch("streamlit.web.bootstrap.run"),
):
result = self.runner.invoke(cli, ["run", "file_name.py"])
assert (prompt_mode and not headless_mode) == (
"like to receive helpful onboarding emails, news, offers, promotions,"
in result.output
), (
f"Welcome message mode is {prompt_mode} "
f"and headless mode is {headless_mode} "
f"yet output is: {result.output}"
)
def test_streamlit_folder_not_created_when_show_email_prompt_false(self):
"""Test that ~/.streamlit directory is not created when server.showEmailPrompt=False."""
with testutil.patch_config_options(
{"server.showEmailPrompt": False, "server.headless": False}
):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("pathlib.Path.exists", return_value=True),
patch("streamlit.config.is_manually_set", return_value=False),
patch(
"streamlit.runtime.credentials._check_credential_file_exists",
return_value=False,
),
patch("streamlit.runtime.credentials.os.makedirs") as mock_makedirs,
patch("streamlit.web.bootstrap.run"),
):
result = self.runner.invoke(cli, ["run", "file_name.py"])
# Assert that makedirs was never called to create ~/.streamlit directory
mock_makedirs.assert_not_called()
assert result.exit_code == 0
def test_streamlit_folder_created_when_show_email_prompt_true(self):
"""Test that ~/.streamlit directory is created when server.showEmailPrompt=True."""
with testutil.patch_config_options(
{"server.showEmailPrompt": True, "server.headless": False}
):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("pathlib.Path.exists", return_value=True),
patch("streamlit.config.is_manually_set", return_value=False),
patch(
"streamlit.runtime.credentials._check_credential_file_exists",
return_value=False,
),
patch("streamlit.runtime.credentials.os.makedirs") as mock_makedirs,
patch("streamlit.web.bootstrap.run"),
patch("click.prompt", return_value="test@example.com") as mock_prompt,
patch(
"streamlit.runtime.credentials.open", mock.mock_open(), create=True
),
patch("streamlit.runtime.credentials._send_email"),
):
result = self.runner.invoke(cli, ["run", "file_name.py"])
# Assert that makedirs was called to create ~/.streamlit directory
mock_makedirs.assert_called_once()
# Assert that the email prompt was shown
mock_prompt.assert_called_once()
assert result.exit_code == 0
def test_help_command(self):
"""Tests the help command redirects to using the --help flag"""
with patch.object(sys, "argv", ["streamlit", "help"]) as args:
self.runner.invoke(cli, ["help"])
assert args[1] == "--help"
def test_version_command(self):
"""Tests the version command redirects to using the --version flag"""
with patch.object(sys, "argv", ["streamlit", "version"]) as args:
self.runner.invoke(cli, ["version"])
assert args[1] == "--version"
def test_docs_command(self):
"""Tests the docs command opens the browser"""
with patch("streamlit.cli_util.open_browser") as mock_open_browser:
self.runner.invoke(cli, ["docs"])
mock_open_browser.assert_called_once_with("https://docs.streamlit.io")
def test_hello_command(self):
"""Tests the hello command runs the hello script in streamlit"""
from streamlit.hello import streamlit_app
with patch("streamlit.web.cli._main_run") as mock_main_run:
self.runner.invoke(cli, ["hello"])
mock_main_run.assert_called_once()
positional_args = mock_main_run.call_args[0]
assert positional_args[0] == streamlit_app.__file__
@patch("streamlit.logger.get_logger")
def test_hello_command_with_logs(self, get_logger):
"""Tests setting log level using --log_level prints a warning."""
with patch("streamlit.web.cli._main_run"):
self.runner.invoke(cli, ["--log_level", "error", "hello"])
mock_logger = get_logger()
mock_logger.warning.assert_called_once()
def test_hello_command_with_flag_config_options(self):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.bootstrap.run"),
patch("pathlib.Path.exists", return_value=True),
patch("streamlit.web.cli.check_credentials"),
):
result = self.runner.invoke(cli, ["hello", "--server.port=8502"])
streamlit.web.bootstrap.load_config_options.assert_called_once()
_args, kwargs = streamlit.web.bootstrap.load_config_options.call_args
assert kwargs["flag_options"]["server_port"] == 8502
assert result.exit_code == 0
def test_config_show_command(self):
"""Tests the config show command calls the corresponding method in
config
"""
with patch("streamlit.config.show_config") as mock_config:
self.runner.invoke(cli, ["config", "show"])
mock_config.assert_called()
def test_config_show_command_with_flag_config_options(self):
with (
patch("streamlit.url_util.is_url", return_value=False),
patch("streamlit.web.cli._main_run"),
patch("pathlib.Path.exists", return_value=True),
):
result = self.runner.invoke(cli, ["config", "show", "--server.port=8502"])
streamlit.web.bootstrap.load_config_options.assert_called_once()
_args, kwargs = streamlit.web.bootstrap.load_config_options.call_args
assert kwargs["flag_options"]["server_port"] == 8502
assert result.exit_code == 0
@patch(
"streamlit.runtime.caching.storage.local_disk_cache_storage.LocalDiskCacheStorageManager.clear_all"
)
@patch("streamlit.runtime.caching.cache_resource.clear")
def test_cache_clear_all_caches(self, clear_resource_caches, clear_data_caches):
"""cli.clear_cache should clear st.cache_data and st.cache_resource"""
self.runner.invoke(cli, ["cache", "clear"])
clear_resource_caches.assert_called_once()
clear_data_caches.assert_called_once()
def test_activate_command(self):
"""Tests activating a credential"""
mock_credential = MagicMock()
with mock.patch(
"streamlit.runtime.credentials.Credentials.get_current",
return_value=mock_credential,
):
self.runner.invoke(cli, ["activate"])
mock_credential.activate.assert_called()
def test_activate_without_command(self):
"""Tests that it doesn't activate the credential when not specified"""
mock_credential = MagicMock()
with mock.patch(
"streamlit.runtime.credentials.Credentials.get_current",
return_value=mock_credential,
):
self.runner.invoke(cli)
mock_credential.activate.assert_not_called()
def test_reset_command(self):
"""Tests resetting a credential"""
mock_credential = MagicMock()
with mock.patch(
"streamlit.runtime.credentials.Credentials.get_current",
return_value=mock_credential,
):
self.runner.invoke(cli, ["activate", "reset"])
mock_credential.reset.assert_called()
def test_init_command(self):
"""Test creating a new project in current directory."""
with tempfile.TemporaryDirectory() as tmpdir:
orig_dir = os.getcwd()
os.chdir(tmpdir)
try:
result = self.runner.invoke(cli, ["init"], input="n\n")
# Check command output
assert result.exit_code == 0
# Check created files
assert Path(tmpdir, "requirements.txt").exists()
assert Path(tmpdir, "streamlit_app.py").exists()
# Check file contents
assert "streamlit" in Path(tmpdir, "requirements.txt").read_text()
assert (
"import streamlit as st"
in Path(tmpdir, "streamlit_app.py").read_text()
)
finally:
os.chdir(orig_dir)
def test_init_command_with_directory(self):
"""Test creating a new project in specified directory."""
with tempfile.TemporaryDirectory() as tmpdir:
orig_dir = os.getcwd()
os.chdir(tmpdir)
try:
result = self.runner.invoke(cli, ["init", "new-project"], input="n\n")
# Check command output
assert result.exit_code == 0
# Check created files
project_dir = Path(tmpdir) / "new-project"
assert (project_dir / "requirements.txt").exists()
assert (project_dir / "streamlit_app.py").exists()
finally:
os.chdir(orig_dir)
| CliTest |
python | euske__pdfminer | pdfminer/layout.py | {
"start": 7233,
"end": 7799
} | class ____(LTComponent):
def __init__(self, bbox):
LTComponent.__init__(self, bbox)
self._objs = []
return
def __iter__(self):
return iter(self._objs)
def __len__(self):
return len(self._objs)
def add(self, obj):
self._objs.append(obj)
return
def extend(self, objs):
for obj in objs:
self.add(obj)
return
def analyze(self, laparams):
for obj in self._objs:
obj.analyze(laparams)
return
## LTExpandableContainer
##
| LTContainer |
python | tornadoweb__tornado | tornado/test/options_test.py | {
"start": 278,
"end": 516
} | class ____:
def __init__(self, value):
if isinstance(value, str) and "@" in value:
self._value = value
else:
raise ValueError()
@property
def value(self):
return self._value
| Email |
python | apache__airflow | providers/databricks/tests/unit/databricks/hooks/test_databricks_azure_workload_identity_async.py | {
"start": 1696,
"end": 3179
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="databricks",
host=HOST,
extra=json.dumps(
{
DEFAULT_AZURE_CREDENTIAL_SETTING_KEY: True,
}
),
)
)
@pytest.mark.asyncio
@mock.patch(
"azure.identity.aio.DefaultAzureCredential.get_token", return_value=create_aad_token_for_resource()
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_one(self, requests_mock, get_token_mock: mock.MagicMock):
with mock.patch.dict(
os.environ,
{
"AZURE_CLIENT_ID": "fake-client-id",
"AZURE_TENANT_ID": "fake-tenant-id",
"AZURE_FEDERATED_TOKEN_FILE": "/badpath",
"KUBERNETES_SERVICE_HOST": "fakeip",
},
):
requests_mock.return_value.__aenter__.return_value.json.side_effect = mock.AsyncMock(
side_effect=[{"data": 1}]
)
async with DatabricksHook(retry_args=DEFAULT_RETRY_ARGS) as hook:
result = await hook.a_get_run_output(0)
assert result == {"data": 1}
| TestDatabricksHookAadTokenWorkloadIdentityAsync |
python | joke2k__faker | faker/providers/color/de_AT/__init__.py | {
"start": 44,
"end": 83
} | class ____(BaseProvider):
pass
| Provider |
python | conda__conda | conda/common/configuration.py | {
"start": 4239,
"end": 4653
} | class ____(ValidationError):
def __init__(self, parameter_name, parameter_value, source, custom_message):
super().__init__(
parameter_name,
parameter_value,
source,
msg=(
f"Parameter {parameter_name} = {parameter_value!r} declared in "
f"{source} is invalid.\n{custom_message}"
),
)
| CustomValidationError |
python | google__jax | jax/_src/source_info_util.py | {
"start": 6784,
"end": 7187
} | class ____(Exception): pass
_message = (
'The preceding stack trace is the source of the JAX operation that, once '
'transformed by JAX, triggered the following exception.\n'
'\n--------------------')
def has_user_context(e):
while e is not None:
if isinstance(e, JaxStackTraceBeforeTransformation):
return True
e = e.__cause__
return False
| JaxStackTraceBeforeTransformation |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 22982,
"end": 24793
} | class ____(TestCase):
def test_r_less_than_n(self):
iterable = 'abcde'
r = 4
for index, expected in enumerate(permutations(iterable, r)):
actual = mi.nth_permutation(iterable, r, index)
self.assertEqual(actual, expected)
def test_r_equal_to_n(self):
iterable = 'abcde'
for index, expected in enumerate(permutations(iterable)):
actual = mi.nth_permutation(iterable, None, index)
self.assertEqual(actual, expected)
def test_long(self):
iterable = tuple(range(180))
r = 4
index = 1000000
actual = mi.nth_permutation(iterable, r, index)
expected = mi.nth(permutations(iterable, r), index)
self.assertEqual(actual, expected)
def test_null(self):
actual = mi.nth_permutation([], 0, 0)
expected = tuple()
self.assertEqual(actual, expected)
def test_negative_index(self):
iterable = 'abcde'
r = 4
n = factorial(len(iterable)) // factorial(len(iterable) - r)
for index, expected in enumerate(permutations(iterable, r)):
actual = mi.nth_permutation(iterable, r, index - n)
self.assertEqual(actual, expected)
def test_invalid_index(self):
iterable = 'abcde'
r = 4
n = factorial(len(iterable)) // factorial(len(iterable) - r)
for index in [-1 - n, n + 1]:
with self.assertRaises(IndexError):
mi.nth_permutation(iterable, r, index)
def test_invalid_r(self):
iterable = 'abcde'
r = 4
n = factorial(len(iterable)) // factorial(len(iterable) - r)
for r in [-1, n + 1]:
with self.assertRaises(ValueError):
mi.nth_permutation(iterable, r, 0)
| NthPermutationTests |
python | jina-ai__jina | tests/unit/serve/executors/metas_executors.py | {
"start": 53,
"end": 171
} | class ____(Executor):
@requests
def process(self, docs: DocumentArray, **kwargs):
return docs
| TestExecutor |
python | pydata__xarray | asv_bench/benchmarks/rolling.py | {
"start": 4352,
"end": 5133
} | class ____(RollingMemory):
@parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False]))
def peakmem_ndrolling_reduce(self, func, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
roll = self.ds.rolling(x=10, y=4)
getattr(roll, func)()
@parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False]))
def peakmem_1drolling_reduce(self, func, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
roll = self.ds.rolling(t=100)
getattr(roll, func)()
@parameterized(["stride"], ([None, 5, 50]))
def peakmem_1drolling_construct(self, stride):
self.ds.rolling(t=100).construct("w", stride=stride)
| DatasetRollingMemory |
python | sympy__sympy | sympy/printing/cxx.py | {
"start": 5524,
"end": 6123
} | class ____(_CXXCodePrinterBase, C99CodePrinter):
standard = 'C++17'
reserved_words = set(reserved['C++17'])
_kf = dict(C99CodePrinter._kf, **_math_functions['C++17'])
def _print_beta(self, expr):
return self._print_math_func(expr)
def _print_Ei(self, expr):
return self._print_math_func(expr)
def _print_zeta(self, expr):
return self._print_math_func(expr)
# _attach_print_methods(CXX17CodePrinter, _math_functions)
cxx_code_printers = {
'c++98': CXX98CodePrinter,
'c++11': CXX11CodePrinter,
'c++17': CXX17CodePrinter
}
| CXX17CodePrinter |
python | plotly__plotly.py | plotly/graph_objs/funnelarea/_marker.py | {
"start": 233,
"end": 4749
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnelarea"
_path_str = "funnelarea.marker"
_valid_props = {"colors", "colorssrc", "line", "pattern"}
@property
def colors(self):
"""
Sets the color of each sector. If not specified, the default
trace color set is used to pick the sector colors.
The 'colors' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["colors"]
@colors.setter
def colors(self, val):
self["colors"] = val
@property
def colorssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `colors`.
The 'colorssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorssrc"]
@colorssrc.setter
def colorssrc(self, val):
self["colorssrc"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.funnelarea.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def pattern(self):
"""
Sets the pattern within the marker.
The 'pattern' property is an instance of Pattern
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.marker.Pattern`
- A dict of string/value properties that will be passed
to the Pattern constructor
Returns
-------
plotly.graph_objs.funnelarea.marker.Pattern
"""
return self["pattern"]
@pattern.setter
def pattern(self, val):
self["pattern"] = val
@property
def _prop_descriptions(self):
return """\
colors
Sets the color of each sector. If not specified, the
default trace color set is used to pick the sector
colors.
colorssrc
Sets the source reference on Chart Studio Cloud for
`colors`.
line
:class:`plotly.graph_objects.funnelarea.marker.Line`
instance or dict with compatible properties
pattern
Sets the pattern within the marker.
"""
def __init__(
self, arg=None, colors=None, colorssrc=None, line=None, pattern=None, **kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnelarea.Marker`
colors
Sets the color of each sector. If not specified, the
default trace color set is used to pick the sector
colors.
colorssrc
Sets the source reference on Chart Studio Cloud for
`colors`.
line
:class:`plotly.graph_objects.funnelarea.marker.Line`
instance or dict with compatible properties
pattern
Sets the pattern within the marker.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnelarea.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnelarea.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("colors", arg, colors)
self._set_property("colorssrc", arg, colorssrc)
self._set_property("line", arg, line)
self._set_property("pattern", arg, pattern)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | networkx__networkx | networkx/generators/tests/test_nonisomorphic_trees.py | {
"start": 1075,
"end": 2945
} | class ____:
def test_tree_structure(self):
# test for tree structure for nx.nonisomorphic_trees()
def f(x):
return list(nx.nonisomorphic_trees(x))
for i in f(6):
assert nx.is_tree(i)
for i in f(8):
assert nx.is_tree(i)
def test_nonisomorphism(self):
# test for nonisomorphism of trees for nx.nonisomorphic_trees()
def f(x):
return list(nx.nonisomorphic_trees(x))
trees = f(6)
for i in range(len(trees)):
for j in range(i + 1, len(trees)):
assert not nx.is_isomorphic(trees[i], trees[j])
trees = f(8)
for i in range(len(trees)):
for j in range(i + 1, len(trees)):
assert not nx.is_isomorphic(trees[i], trees[j])
def test_number_of_nonisomorphic_trees(self):
# http://oeis.org/A000055
assert nx.number_of_nonisomorphic_trees(2) == 1
assert nx.number_of_nonisomorphic_trees(3) == 1
assert nx.number_of_nonisomorphic_trees(4) == 2
assert nx.number_of_nonisomorphic_trees(5) == 3
assert nx.number_of_nonisomorphic_trees(6) == 6
assert nx.number_of_nonisomorphic_trees(7) == 11
assert nx.number_of_nonisomorphic_trees(8) == 23
assert nx.number_of_nonisomorphic_trees(9) == 47
assert nx.number_of_nonisomorphic_trees(10) == 106
assert nx.number_of_nonisomorphic_trees(20) == 823065
assert nx.number_of_nonisomorphic_trees(30) == 14830871802
def test_nonisomorphic_trees(self):
def f(x):
return list(nx.nonisomorphic_trees(x))
assert edges_equal(f(3)[0].edges(), [(0, 1), (0, 2)])
assert edges_equal(f(4)[0].edges(), [(0, 1), (0, 3), (1, 2)])
assert edges_equal(f(4)[1].edges(), [(0, 1), (0, 2), (0, 3)])
| TestGeneratorNonIsomorphicTrees |
python | django__django | tests/staticfiles_tests/test_storage.py | {
"start": 28510,
"end": 31087
} | class ____(CollectionTestCase):
hashed_file_path = hashed_file_path
def test_module_import(self):
relpath = self.hashed_file_path("cached/module.js")
self.assertEqual(relpath, "cached/module.4326210cf0bd.js")
tests = [
# Relative imports.
b'import testConst from "./module_test.477bbebe77f0.js";',
b'import relativeModule from "../nested/js/nested.866475c46bb4.js";',
b'import { firstConst, secondConst } from "./module_test.477bbebe77f0.js";',
# Absolute import.
b'import rootConst from "/static/absolute_root.5586327fe78c.js";',
# Dynamic import.
b'const dynamicModule = import("./module_test.477bbebe77f0.js");',
# Creating a module object.
b'import * as NewModule from "./module_test.477bbebe77f0.js";',
# Creating a minified module object.
b'import*as m from "./module_test.477bbebe77f0.js";',
b'import* as m from "./module_test.477bbebe77f0.js";',
b'import *as m from "./module_test.477bbebe77f0.js";',
b'import* as m from "./module_test.477bbebe77f0.js";',
# Aliases.
b'import { testConst as alias } from "./module_test.477bbebe77f0.js";',
b"import {\n"
b" firstVar1 as firstVarAlias,\n"
b" $second_var_2 as secondVarAlias\n"
b'} from "./module_test.477bbebe77f0.js";',
]
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
for module_import in tests:
with self.subTest(module_import=module_import):
self.assertIn(module_import, content)
def test_aggregating_modules(self):
relpath = self.hashed_file_path("cached/module.js")
self.assertEqual(relpath, "cached/module.4326210cf0bd.js")
tests = [
b'export * from "./module_test.477bbebe77f0.js";',
b'export { testConst } from "./module_test.477bbebe77f0.js";',
b"export {\n"
b" firstVar as firstVarAlias,\n"
b" secondVar as secondVarAlias\n"
b'} from "./module_test.477bbebe77f0.js";',
]
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
for module_import in tests:
with self.subTest(module_import=module_import):
self.assertIn(module_import, content)
| TestCollectionJSModuleImportAggregationManifestStorage |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 8102,
"end": 8540
} | class ____(_Permission[AliasAction]):
alias: str
collection: str
def _to_weaviate(self) -> List[WeaviatePermission]:
return [
{
"action": action,
"aliases": {
"alias": _capitalize_first_letter(self.alias),
"collection": self.collection,
},
}
for action in self.actions
]
| _AliasPermission |
python | tensorflow__tensorflow | tensorflow/python/autograph/converters/return_statements.py | {
"start": 5964,
"end": 13067
} | class ____(converter.Base):
"""Lowers return statements into variables and conditionals.
Specifically, the following pattern:
<block 1>
return val
<block 2>
is converted to:
do_return = False
retval = None
<block 1>
do_return = True
retval = val
if not do_return:
<block 2>
return retval
The conversion adjusts loops as well:
<block 1>
while cond:
<block 2>
return retval
is converted to:
<block 1>
while not do_return and cond:
<block 2>
do_return = True
retval = val
"""
def __init__(self, ctx, allow_missing_return):
super(ReturnStatementsTransformer, self).__init__(ctx)
self.allow_missing_return = allow_missing_return
def visit_Return(self, node):
for block in reversed(self.state[_Block].stack):
block.return_used = True
block.create_guard_next = True
if block.is_function:
break
retval = node.value if node.value else parser.parse_expression('None')
# Note: If `return <expr> raises, then the return is aborted.
# The try-catch below ensures the variables remain consistent in that case.
template = """
try:
do_return_var_name = True
retval_var_name = retval
except:
do_return_var_name = False
raise
"""
node = templates.replace(
template,
do_return_var_name=self.state[_Function].do_return_var_name,
retval_var_name=self.state[_Function].retval_var_name,
retval=retval)
return node
def _postprocess_statement(self, node):
if not self.state[_Block].return_used:
return node, None
state = self.state[_Block]
if state.create_guard_now:
template = """
if not do_return_var_name:
original_node
"""
cond, = templates.replace(
template,
do_return_var_name=self.state[_Function].do_return_var_name,
original_node=node)
node, block = cond, cond.body
else:
node, block = node, None
state.create_guard_now = state.create_guard_next
state.create_guard_next = False
return node, block
def _visit_statement_block(self, node, nodes):
self.state[_Block].enter()
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
self.state[_Block].exit()
return nodes
def visit_While(self, node):
node.test = self.visit(node.test)
# Add the check for return to the loop condition.
node.body = self._visit_statement_block(node, node.body)
if self.state[_Block].return_used:
node.test = templates.replace_as_expression(
'not control_var and test',
test=node.test,
control_var=self.state[_Function].do_return_var_name)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_For(self, node):
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
# Add the check for return to the loop condition.
node.body = self._visit_statement_block(node, node.body)
if self.state[_Block].return_used:
extra_test = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST, default=None)
if extra_test is not None:
extra_test = templates.replace_as_expression(
'not control_var and extra_test',
extra_test=extra_test,
control_var=self.state[_Function].do_return_var_name)
else:
extra_test = templates.replace_as_expression(
'not control_var',
control_var=self.state[_Function].do_return_var_name)
anno.setanno(node, anno.Basic.EXTRA_LOOP_TEST, extra_test)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_statement_block(node, node.body)
return node
def visit_Try(self, node):
node.body = self._visit_statement_block(node, node.body)
node.orelse = self._visit_statement_block(node, node.orelse)
node.finalbody = self._visit_statement_block(node, node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
node.body = self._visit_statement_block(node, node.body)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body = self._visit_statement_block(node, node.body)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_FunctionDef(self, node):
with self.state[_Function] as fn:
with self.state[_Block] as block:
block.is_function = True
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
do_return_var_name = self.ctx.namer.new_symbol('do_return',
scope.referenced)
retval_var_name = self.ctx.namer.new_symbol('retval_', scope.referenced)
fn.do_return_var_name = do_return_var_name
fn.retval_var_name = retval_var_name
node.body = self._visit_statement_block(node, node.body)
if block.return_used:
if self.allow_missing_return:
# The function would have a single `with` node that wraps the
# entire body. If the function had a docstring, the body has two
# nodes, with the `with` as the second node.
wrapper_node = node.body[-1]
assert isinstance(wrapper_node, gast.With), (
'This transformer requires the functions converter.')
template = """
do_return_var_name = False
retval_var_name = ag__.UndefinedReturnValue()
body
return function_context.ret(retval_var_name, do_return_var_name)
"""
wrapper_node.body = templates.replace(
template,
body=wrapper_node.body,
do_return_var_name=do_return_var_name,
function_context=anno.getanno(node, 'function_context_name'),
retval_var_name=retval_var_name)
else:
template = """
body
return retval_var_name
"""
node.body = templates.replace(
template,
body=node.body,
do_return_var_name=do_return_var_name,
retval_var_name=retval_var_name)
return node
def transform(node, ctx, default_to_null_return=True):
"""Ensure a function has only a single return, at the end."""
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
# Note: Technically, these two could be merged into a single walk, but
# keeping them separate helps with readability.
node = ConditionalReturnRewriter(ctx).visit(node)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
transformer = ReturnStatementsTransformer(
ctx, allow_missing_return=default_to_null_return)
node = transformer.visit(node)
return node
| ReturnStatementsTransformer |
python | readthedocs__readthedocs.org | readthedocs/search/models.py | {
"start": 501,
"end": 3031
} | class ____(TimeStampedModel):
"""Information about the search queries."""
project = models.ForeignKey(
Project,
related_name="search_queries",
on_delete=models.CASCADE,
)
version = models.ForeignKey(
Version,
verbose_name=_("Version"),
related_name="search_queries",
on_delete=models.CASCADE,
)
query = models.CharField(
_("Query"),
max_length=4092,
)
total_results = models.IntegerField(
_("Total results"),
default=0,
)
objects = RelatedProjectQuerySet.as_manager()
class Meta:
verbose_name = "Search query"
verbose_name_plural = "Search queries"
indexes = [
models.Index(fields=["modified", "project", "version"]),
]
def __str__(self):
return self.query
@classmethod
def generate_queries_count_of_one_month(cls, project_slug):
"""
Returns the total queries performed each day of the last 30 days (including today).
Structure of returned data is compatible to make graphs.
Sample returned data::
{
'labels': ['01 Jul', '02 Jul', '03 Jul'],
'int_data': [150, 200, 143]
}
This data shows that there were 150 searches were made on 01 July,
200 searches on 02 July and 143 searches on 03 July.
"""
today = timezone.now().date()
last_30th_day = timezone.now().date() - timezone.timedelta(days=30)
qs = cls.objects.filter(
project__slug=project_slug,
created__date__lte=today,
created__date__gte=last_30th_day,
).order_by("-created")
# dict containing the total number of queries
# of each day for the past 30 days (if present in database).
count_dict = dict(
qs.annotate(created_date=TruncDate("created"))
.values("created_date")
.order_by("created_date")
.annotate(count=Count("id"))
.values_list("created_date", "count")
)
count_data = [count_dict.get(date) or 0 for date in _last_30_days_iter()]
# format the date value to a more readable form
# Eg. `16 Jul`
last_30_days_str = [
timezone.datetime.strftime(date, "%d %b") for date in _last_30_days_iter()
]
final_data = {
"labels": last_30_days_str,
"int_data": count_data,
}
return final_data
| SearchQuery |
python | mlflow__mlflow | dev/set_matrix.py | {
"start": 1348,
"end": 2169
} | class ____(OriginalVersion):
def __init__(self, version: str, release_date: datetime | None = None):
self._is_dev = version == DEV_VERSION
self._release_date = release_date
super().__init__(DEV_NUMERIC if self._is_dev else version)
def __str__(self):
return DEV_VERSION if self._is_dev else super().__str__()
@classmethod
def create_dev(cls):
return cls(DEV_VERSION, datetime.now(timezone.utc))
@property
def days_since_release(self) -> int | None:
"""
Compute the number of days since this version was released.
Returns None if release date is not available.
"""
if self._release_date is None:
return None
delta = datetime.now(timezone.utc) - self._release_date
return delta.days
| Version |
python | python-poetry__poetry | src/poetry/utils/env/generic_env.py | {
"start": 348,
"end": 3162
} | class ____(VirtualEnv):
def __init__(
self, path: Path, base: Path | None = None, child_env: Env | None = None
) -> None:
self._child_env = child_env
super().__init__(path, base=base)
def find_executables(self) -> None:
patterns = [("python*", "pip*")]
if self._child_env:
minor_version = (
f"{self._child_env.version_info[0]}.{self._child_env.version_info[1]}"
)
major_version = f"{self._child_env.version_info[0]}"
patterns = [
(f"python{minor_version}", f"pip{minor_version}"),
(f"python{major_version}", f"pip{major_version}"),
]
python_executable = None
pip_executable = None
for python_pattern, pip_pattern in patterns:
if python_executable and pip_executable:
break
if not python_executable:
python_executables = sorted(
p.name
for p in self._bin_dir.glob(python_pattern)
if re.match(r"python(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
)
if python_executables:
executable = python_executables[0]
if executable.endswith(".exe"):
executable = executable[:-4]
python_executable = executable
if not pip_executable:
pip_executables = sorted(
p.name
for p in self._bin_dir.glob(pip_pattern)
if re.match(r"pip(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
)
if pip_executables:
pip_executable = pip_executables[0]
if pip_executable.endswith(".exe"):
pip_executable = pip_executable[:-4]
if python_executable:
self._executable = python_executable
if pip_executable:
self._pip_executable = pip_executable
def get_paths(self) -> dict[str, str]:
output = self.run_python_script(GET_PATHS)
paths: dict[str, str] = json.loads(output)
return paths
def execute(self, bin: str, *args: str, **kwargs: Any) -> int:
command = self.get_command_from_bin(bin) + list(args)
env = kwargs.pop("env", dict(os.environ))
if not self._is_windows:
return os.execvpe(command[0], command, env=env)
exe = subprocess.Popen(command, env=env, **kwargs)
exe.communicate()
return exe.returncode
def _run(self, cmd: list[str], **kwargs: Any) -> str:
return super(VirtualEnv, self)._run(cmd, **kwargs)
def is_venv(self) -> bool:
return self._path != self._base
| GenericEnv |
python | pytest-dev__pytest | testing/test_junitxml.py | {
"start": 5936,
"end": 35630
} | class ____:
@parametrize_families
def test_summing_simple(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("")
@pytest.mark.xfail
def test_xfail():
assert 0
@pytest.mark.xfail
def test_xpass():
assert 1
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(name="pytest", errors=0, failures=1, skipped=2, tests=5)
@parametrize_families
def test_summing_simple_with_errors(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def fixture():
raise Exception()
def test_pass():
pass
def test_fail():
assert 0
def test_error(fixture):
pass
@pytest.mark.xfail
def test_xfail():
assert False
@pytest.mark.xfail(strict=True)
def test_xpass():
assert True
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(name="pytest", errors=1, failures=2, skipped=1, tests=5)
@parametrize_families
def test_hostname_in_xml(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
def test_pass():
pass
"""
)
_result, dom = run_and_parse(family=xunit_family)
node = dom.get_first_by_tag("testsuite")
node.assert_attr(hostname=platform.node())
@parametrize_families
def test_timestamp_in_xml(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
def test_pass():
pass
"""
)
start_time = datetime.now(timezone.utc)
_result, dom = run_and_parse(family=xunit_family)
node = dom.get_first_by_tag("testsuite")
timestamp = datetime.fromisoformat(node["timestamp"])
assert start_time <= timestamp < datetime.now(timezone.utc)
def test_timing_function(
self,
pytester: Pytester,
run_and_parse: RunAndParse,
mock_timing: _pytest.timing.MockTiming,
) -> None:
pytester.makepyfile(
"""
from _pytest import timing
def setup_module():
timing.sleep(1)
def teardown_module():
timing.sleep(2)
def test_sleep():
timing.sleep(4)
"""
)
_result, dom = run_and_parse()
node = dom.get_first_by_tag("testsuite")
tnode = node.get_first_by_tag("testcase")
val = tnode["time"]
assert val is not None
assert float(val) == 7.0
@pytest.mark.parametrize("duration_report", ["call", "total"])
def test_junit_duration_report(
self,
pytester: Pytester,
monkeypatch: MonkeyPatch,
duration_report: str,
run_and_parse: RunAndParse,
) -> None:
# mock LogXML.node_reporter so it always sets a known duration to each test report object
original_node_reporter = LogXML.node_reporter
def node_reporter_wrapper(s: Any, report: TestReport) -> Any:
report.duration = 1.0
reporter = original_node_reporter(s, report)
return reporter
monkeypatch.setattr(LogXML, "node_reporter", node_reporter_wrapper)
pytester.makepyfile(
"""
def test_foo():
pass
"""
)
_result, dom = run_and_parse("-o", f"junit_duration_report={duration_report}")
node = dom.get_first_by_tag("testsuite")
tnode = node.get_first_by_tag("testcase")
val = float(tnode["time"])
if duration_report == "total":
assert val == 3.0
else:
assert duration_report == "call"
assert val == 1.0
@parametrize_families
def test_setup_error(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg(request):
raise ValueError("Error reason")
def test_function(arg):
pass
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(errors=1, tests=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="test_setup_error", name="test_function")
fnode = tnode.get_first_by_tag("error")
fnode.assert_attr(message='failed on setup with "ValueError: Error reason"')
assert "ValueError" in fnode.toxml()
@parametrize_families
def test_teardown_error(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg():
yield
raise ValueError('Error reason')
def test_function(arg):
pass
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="test_teardown_error", name="test_function")
fnode = tnode.get_first_by_tag("error")
fnode.assert_attr(message='failed on teardown with "ValueError: Error reason"')
assert "ValueError" in fnode.toxml()
@parametrize_families
def test_call_failure_teardown_error(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg():
yield
raise Exception("Teardown Exception")
def test_function(arg):
raise Exception("Call Exception")
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(errors=1, failures=1, tests=1)
first, second = dom.find_by_tag("testcase")
assert first
assert second
assert first != second
fnode = first.get_first_by_tag("failure")
fnode.assert_attr(message="Exception: Call Exception")
snode = second.get_first_by_tag("error")
snode.assert_attr(
message='failed on teardown with "Exception: Teardown Exception"'
)
@parametrize_families
def test_skip_contains_name_reason(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
def test_skip():
pytest.skip("hello23")
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret == 0
node = dom.get_first_by_tag("testsuite")
node.assert_attr(skipped=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="test_skip_contains_name_reason", name="test_skip")
snode = tnode.get_first_by_tag("skipped")
snode.assert_attr(type="pytest.skip", message="hello23")
@parametrize_families
def test_mark_skip_contains_name_reason(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip(reason="hello24")
def test_skip():
assert True
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret == 0
node = dom.get_first_by_tag("testsuite")
node.assert_attr(skipped=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(
classname="test_mark_skip_contains_name_reason", name="test_skip"
)
snode = tnode.get_first_by_tag("skipped")
snode.assert_attr(type="pytest.skip", message="hello24")
@parametrize_families
def test_mark_skipif_contains_name_reason(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
GLOBAL_CONDITION = True
@pytest.mark.skipif(GLOBAL_CONDITION, reason="hello25")
def test_skip():
assert True
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret == 0
node = dom.get_first_by_tag("testsuite")
node.assert_attr(skipped=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(
classname="test_mark_skipif_contains_name_reason", name="test_skip"
)
snode = tnode.get_first_by_tag("skipped")
snode.assert_attr(type="pytest.skip", message="hello25")
@parametrize_families
def test_mark_skip_doesnt_capture_output(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip(reason="foo")
def test_skip():
print("bar!")
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret == 0
node_xml = dom.get_first_by_tag("testsuite").toxml()
assert "bar!" not in node_xml
@parametrize_families
def test_classname_instance(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
class TestClass(object):
def test_method(self):
assert 0
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(failures=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(
classname="test_classname_instance.TestClass", name="test_method"
)
@parametrize_families
def test_classname_nested_dir(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
p = pytester.mkdir("sub").joinpath("test_hello.py")
p.write_text("def test_func(): 0/0", encoding="utf-8")
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(failures=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="sub.test_hello", name="test_func")
@parametrize_families
def test_internal_error(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makeconftest("def pytest_runtest_protocol(): 0 / 0")
pytester.makepyfile("def test_function(): pass")
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(errors=1, tests=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="pytest", name="internal")
fnode = tnode.get_first_by_tag("error")
fnode.assert_attr(message="internal error")
assert "Division" in fnode.toxml()
@pytest.mark.parametrize(
"junit_logging", ["no", "log", "system-out", "system-err", "out-err", "all"]
)
@parametrize_families
def test_failure_function(
self,
pytester: Pytester,
junit_logging: str,
run_and_parse: RunAndParse,
xunit_family: str,
) -> None:
pytester.makepyfile(
"""
import logging
import sys
def test_fail():
print("hello-stdout")
sys.stderr.write("hello-stderr\\n")
logging.info('info msg')
logging.warning('warning msg')
raise ValueError(42)
"""
)
result, dom = run_and_parse(
"-o", f"junit_logging={junit_logging}", family=xunit_family
)
assert result.ret, "Expected ret > 0"
node = dom.get_first_by_tag("testsuite")
node.assert_attr(failures=1, tests=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="test_failure_function", name="test_fail")
fnode = tnode.get_first_by_tag("failure")
fnode.assert_attr(message="ValueError: 42")
assert "ValueError" in fnode.toxml(), "ValueError not included"
if junit_logging in ["log", "all"]:
logdata = tnode.get_first_by_tag("system-out")
log_xml = logdata.toxml()
assert logdata.tag == "system-out", "Expected tag: system-out"
assert "info msg" not in log_xml, "Unexpected INFO message"
assert "warning msg" in log_xml, "Missing WARN message"
if junit_logging in ["system-out", "out-err", "all"]:
systemout = tnode.get_first_by_tag("system-out")
systemout_xml = systemout.toxml()
assert systemout.tag == "system-out", "Expected tag: system-out"
assert "info msg" not in systemout_xml, "INFO message found in system-out"
assert "hello-stdout" in systemout_xml, (
"Missing 'hello-stdout' in system-out"
)
if junit_logging in ["system-err", "out-err", "all"]:
systemerr = tnode.get_first_by_tag("system-err")
systemerr_xml = systemerr.toxml()
assert systemerr.tag == "system-err", "Expected tag: system-err"
assert "info msg" not in systemerr_xml, "INFO message found in system-err"
assert "hello-stderr" in systemerr_xml, (
"Missing 'hello-stderr' in system-err"
)
assert "warning msg" not in systemerr_xml, (
"WARN message found in system-err"
)
if junit_logging == "no":
assert not tnode.find_by_tag("log"), "Found unexpected content: log"
assert not tnode.find_by_tag("system-out"), (
"Found unexpected content: system-out"
)
assert not tnode.find_by_tag("system-err"), (
"Found unexpected content: system-err"
)
@parametrize_families
def test_failure_verbose_message(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import sys
def test_fail():
assert 0, "An error"
"""
)
_result, dom = run_and_parse(family=xunit_family)
node = dom.get_first_by_tag("testsuite")
tnode = node.get_first_by_tag("testcase")
fnode = tnode.get_first_by_tag("failure")
fnode.assert_attr(message="AssertionError: An error\nassert 0")
@parametrize_families
def test_failure_escape(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('arg1', "<&'", ids="<&'")
def test_func(arg1):
print(arg1)
assert 0
"""
)
result, dom = run_and_parse(
"-o", "junit_logging=system-out", family=xunit_family
)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(failures=3, tests=3)
tnodes = node.find_by_tag("testcase")
for tnode, char in zip(tnodes, "<&'", strict=True):
tnode.assert_attr(
classname="test_failure_escape", name=f"test_func[{char}]"
)
sysout = tnode.get_first_by_tag("system-out")
text = sysout.text
assert f"{char}\n" in text
@parametrize_families
def test_junit_prefixing(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
def test_func():
assert 0
class TestHello(object):
def test_hello(self):
pass
"""
)
result, dom = run_and_parse("--junitprefix=xyz", family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(failures=1, tests=2)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="xyz.test_junit_prefixing", name="test_func")
tnode = node.find_by_tag("testcase")[1]
tnode.assert_attr(
classname="xyz.test_junit_prefixing.TestHello", name="test_hello"
)
@parametrize_families
def test_xfailure_function(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
def test_xfail():
pytest.xfail("42")
"""
)
result, dom = run_and_parse(family=xunit_family)
assert not result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(skipped=1, tests=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="test_xfailure_function", name="test_xfail")
fnode = tnode.get_first_by_tag("skipped")
fnode.assert_attr(type="pytest.xfail", message="42")
@parametrize_families
def test_xfailure_marker(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail(reason="42")
def test_xfail():
assert False
"""
)
result, dom = run_and_parse(family=xunit_family)
assert not result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(skipped=1, tests=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="test_xfailure_marker", name="test_xfail")
fnode = tnode.get_first_by_tag("skipped")
fnode.assert_attr(type="pytest.xfail", message="42")
@pytest.mark.parametrize(
"junit_logging", ["no", "log", "system-out", "system-err", "out-err", "all"]
)
def test_xfail_captures_output_once(
self, pytester: Pytester, junit_logging: str, run_and_parse: RunAndParse
) -> None:
pytester.makepyfile(
"""
import sys
import pytest
@pytest.mark.xfail()
def test_fail():
sys.stdout.write('XFAIL This is stdout')
sys.stderr.write('XFAIL This is stderr')
assert 0
"""
)
_result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
node = dom.get_first_by_tag("testsuite")
tnode = node.get_first_by_tag("testcase")
has_err_logging = junit_logging in ["system-err", "out-err", "all"]
expected_err_output_len = 1 if has_err_logging else 0
assert len(tnode.find_by_tag("system-err")) == expected_err_output_len
has_out_logigng = junit_logging in ("log", "system-out", "out-err", "all")
expected_out_output_len = 1 if has_out_logigng else 0
assert len(tnode.find_by_tag("system-out")) == expected_out_output_len
@parametrize_families
def test_xfailure_xpass(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail
def test_xpass():
pass
"""
)
_result, dom = run_and_parse(family=xunit_family)
# assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(skipped=0, tests=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="test_xfailure_xpass", name="test_xpass")
@parametrize_families
def test_xfailure_xpass_strict(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail(strict=True, reason="This needs to fail!")
def test_xpass():
pass
"""
)
_result, dom = run_and_parse(family=xunit_family)
# assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(skipped=0, tests=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(classname="test_xfailure_xpass_strict", name="test_xpass")
fnode = tnode.get_first_by_tag("failure")
fnode.assert_attr(message="[XPASS(strict)] This needs to fail!")
@parametrize_families
def test_collect_error(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile("syntax error")
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(errors=1, tests=1)
tnode = node.get_first_by_tag("testcase")
fnode = tnode.get_first_by_tag("error")
fnode.assert_attr(message="collection failure")
assert "SyntaxError" in fnode.toxml()
def test_unicode(self, pytester: Pytester, run_and_parse: RunAndParse) -> None:
value = "hx\xc4\x85\xc4\x87\n"
pytester.makepyfile(
f"""\
# coding: latin1
def test_hello():
print({value!r})
assert 0
"""
)
result, dom = run_and_parse()
assert result.ret == 1
tnode = dom.get_first_by_tag("testcase")
fnode = tnode.get_first_by_tag("failure")
assert "hx" in fnode.toxml()
def test_assertion_binchars(
self, pytester: Pytester, run_and_parse: RunAndParse
) -> None:
"""This test did fail when the escaping wasn't strict."""
pytester.makepyfile(
"""
M1 = '\x01\x02\x03\x04'
M2 = '\x01\x02\x03\x05'
def test_str_compare():
assert M1 == M2
"""
)
_result, dom = run_and_parse()
print(dom.toxml())
@pytest.mark.parametrize("junit_logging", ["no", "system-out"])
def test_pass_captures_stdout(
self, pytester: Pytester, run_and_parse: RunAndParse, junit_logging: str
) -> None:
pytester.makepyfile(
"""
def test_pass():
print('hello-stdout')
"""
)
_result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
node = dom.get_first_by_tag("testsuite")
pnode = node.get_first_by_tag("testcase")
if junit_logging == "no":
assert not node.find_by_tag("system-out"), (
"system-out should not be generated"
)
if junit_logging == "system-out":
systemout = pnode.get_first_by_tag("system-out")
assert "hello-stdout" in systemout.toxml(), (
"'hello-stdout' should be in system-out"
)
@pytest.mark.parametrize("junit_logging", ["no", "system-err"])
def test_pass_captures_stderr(
self, pytester: Pytester, run_and_parse: RunAndParse, junit_logging: str
) -> None:
pytester.makepyfile(
"""
import sys
def test_pass():
sys.stderr.write('hello-stderr')
"""
)
_result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
node = dom.get_first_by_tag("testsuite")
pnode = node.get_first_by_tag("testcase")
if junit_logging == "no":
assert not node.find_by_tag("system-err"), (
"system-err should not be generated"
)
if junit_logging == "system-err":
systemerr = pnode.get_first_by_tag("system-err")
assert "hello-stderr" in systemerr.toxml(), (
"'hello-stderr' should be in system-err"
)
@pytest.mark.parametrize("junit_logging", ["no", "system-out"])
def test_setup_error_captures_stdout(
self, pytester: Pytester, run_and_parse: RunAndParse, junit_logging: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg(request):
print('hello-stdout')
raise ValueError()
def test_function(arg):
pass
"""
)
_result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
node = dom.get_first_by_tag("testsuite")
pnode = node.get_first_by_tag("testcase")
if junit_logging == "no":
assert not node.find_by_tag("system-out"), (
"system-out should not be generated"
)
if junit_logging == "system-out":
systemout = pnode.get_first_by_tag("system-out")
assert "hello-stdout" in systemout.toxml(), (
"'hello-stdout' should be in system-out"
)
@pytest.mark.parametrize("junit_logging", ["no", "system-err"])
def test_setup_error_captures_stderr(
self, pytester: Pytester, run_and_parse: RunAndParse, junit_logging: str
) -> None:
pytester.makepyfile(
"""
import sys
import pytest
@pytest.fixture
def arg(request):
sys.stderr.write('hello-stderr')
raise ValueError()
def test_function(arg):
pass
"""
)
_result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
node = dom.get_first_by_tag("testsuite")
pnode = node.get_first_by_tag("testcase")
if junit_logging == "no":
assert not node.find_by_tag("system-err"), (
"system-err should not be generated"
)
if junit_logging == "system-err":
systemerr = pnode.get_first_by_tag("system-err")
assert "hello-stderr" in systemerr.toxml(), (
"'hello-stderr' should be in system-err"
)
@pytest.mark.parametrize("junit_logging", ["no", "system-out"])
def test_avoid_double_stdout(
self, pytester: Pytester, run_and_parse: RunAndParse, junit_logging: str
) -> None:
pytester.makepyfile(
"""
import sys
import pytest
@pytest.fixture
def arg(request):
yield
sys.stdout.write('hello-stdout teardown')
raise ValueError()
def test_function(arg):
sys.stdout.write('hello-stdout call')
"""
)
_result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
node = dom.get_first_by_tag("testsuite")
pnode = node.get_first_by_tag("testcase")
if junit_logging == "no":
assert not node.find_by_tag("system-out"), (
"system-out should not be generated"
)
if junit_logging == "system-out":
systemout = pnode.get_first_by_tag("system-out")
assert "hello-stdout call" in systemout.toxml()
assert "hello-stdout teardown" in systemout.toxml()
def test_mangle_test_address() -> None:
from _pytest.junitxml import mangle_test_address
address = "::".join(["a/my.py.thing.py", "Class", "method", "[a-1-::]"])
newnames = mangle_test_address(address)
assert newnames == ["a.my.py.thing", "Class", "method", "[a-1-::]"]
def test_dont_configure_on_workers(tmp_path: Path) -> None:
gotten: list[object] = []
class FakeConfig:
if TYPE_CHECKING:
workerinput = None
def __init__(self) -> None:
self.pluginmanager = self
self.option = self
self.stash = Stash()
def getini(self, name: str) -> str:
return "pytest"
junitprefix = None
# XXX: shouldn't need tmp_path ?
xmlpath = str(tmp_path.joinpath("junix.xml"))
register = gotten.append
fake_config = cast(Config, FakeConfig())
from _pytest import junitxml
junitxml.pytest_configure(fake_config)
assert len(gotten) == 1
FakeConfig.workerinput = None
junitxml.pytest_configure(fake_config)
assert len(gotten) == 1
| TestPython |
python | django__django | tests/model_forms/test_uuid.py | {
"start": 242,
"end": 1501
} | class ____(TestCase):
def test_create_save_error(self):
form = UUIDPKForm({})
self.assertFalse(form.is_valid())
msg = "The UUIDPK could not be created because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
form.save()
def test_update_save_error(self):
obj = UUIDPK.objects.create(name="foo")
form = UUIDPKForm({}, instance=obj)
self.assertFalse(form.is_valid())
msg = "The UUIDPK could not be changed because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
form.save()
def test_model_multiple_choice_field_uuid_pk(self):
f = forms.ModelMultipleChoiceField(UUIDPK.objects.all())
with self.assertRaisesMessage(
ValidationError, "“invalid_uuid” is not a valid value."
):
f.clean(["invalid_uuid"])
def test_model_choice_invalid_pk_value_error_messages(self):
f = forms.ModelChoiceField(UUIDPK.objects.all())
with self.assertRaisesMessage(
ValidationError,
"['Select a valid choice. "
"That choice is not one of the available choices.']",
):
f.clean("invalid")
| ModelFormBaseTest |
python | mlflow__mlflow | mlflow/store/model_registry/dbmodels/models.py | {
"start": 5738,
"end": 6631
} | class ____(Base):
__tablename__ = "registered_model_aliases"
name = Column(
String(256),
ForeignKey(
"registered_models.name",
onupdate="cascade",
ondelete="cascade",
name="registered_model_alias_name_fkey",
),
)
alias = Column(String(256), nullable=False)
version = Column(Integer, nullable=False)
# linked entities
registered_model = relationship(
"SqlRegisteredModel", backref=backref("registered_model_aliases", cascade="all")
)
__table_args__ = (PrimaryKeyConstraint("name", "alias", name="registered_model_alias_pk"),)
def __repr__(self):
return f"<SqlRegisteredModelAlias ({self.name}, {self.alias}, {self.version})>"
# entity mappers
def to_mlflow_entity(self):
return RegisteredModelAlias(self.alias, self.version)
| SqlRegisteredModelAlias |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-confluence/tests/test_new_features.py | {
"start": 6429,
"end": 10970
} | class ____:
"""Test event system functionality."""
def test_event_system_subscription_and_notification(self):
"""Test that event system can handle event subscriptions and notifications."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
events_received = []
class TestEventHandler(BaseEventHandler):
def handle(self, event):
events_received.append(event)
class PageEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(event, PageDataFetchStartedEvent):
events_received.append(f"PAGE_EVENT: {event.page_id}")
# Subscribe to events using new event system
dispatcher = get_dispatcher("test_new_features_subscription")
general_handler = TestEventHandler()
page_handler = PageEventHandler()
dispatcher.add_event_handler(general_handler)
dispatcher.add_event_handler(page_handler)
# Create and emit a page event
page_event = PageDataFetchStartedEvent(page_id="test_page")
dispatcher.event(page_event)
# Check that both handlers received the event
assert len(events_received) == 2
assert "PAGE_EVENT: test_page" in events_received
assert any(
isinstance(event, PageDataFetchStartedEvent) for event in events_received
)
# Clean up
for handler in [general_handler, page_handler]:
if handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(handler)
def test_event_system_attachment_events(self):
"""Test event system with attachment events."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
attachment_events = []
class AttachmentEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(event, (AttachmentProcessedEvent, AttachmentFailedEvent)):
attachment_events.append(event)
dispatcher = get_dispatcher("test_new_features_attachment")
attachment_handler = AttachmentEventHandler()
dispatcher.add_event_handler(attachment_handler)
# Test attachment processed event
processed_event = AttachmentProcessedEvent(
page_id="page123",
attachment_id="att456",
attachment_name="document.pdf",
attachment_type=FileType.PDF,
attachment_size=1024,
attachment_link="http://example.com/att456",
)
dispatcher.event(processed_event)
# Test attachment failed event
failed_event = AttachmentFailedEvent(
page_id="page123",
attachment_id="att789",
attachment_name="broken.pdf",
attachment_type=FileType.PDF,
attachment_size=2048,
attachment_link="http://example.com/att789",
error="Processing failed",
)
dispatcher.event(failed_event)
assert len(attachment_events) == 2
assert any(
isinstance(event, AttachmentProcessedEvent) for event in attachment_events
)
assert any(
isinstance(event, AttachmentFailedEvent) for event in attachment_events
)
# Clean up
if attachment_handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(attachment_handler)
def test_event_system_handler_removal(self):
"""Test event system handler removal functionality."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
events_received = []
class TestEventHandler(BaseEventHandler):
def handle(self, event):
events_received.append(event)
# Add and then remove event handler
dispatcher = get_dispatcher("test_new_features_removal")
event_handler = TestEventHandler()
dispatcher.add_event_handler(event_handler)
if event_handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(event_handler)
# Create and emit event
page_event = PageDataFetchStartedEvent(page_id="test_page")
dispatcher.event(page_event)
# Should not receive any events since we removed the handler
assert len(events_received) == 0
| TestEventSystem |
python | getsentry__sentry | tests/sentry/workflow_engine/test_task.py | {
"start": 4950,
"end": 16158
} | class ____(TestCase):
def setUp(self) -> None:
self.group = self.create_group(project=self.project, type=MetricIssue.type_id)
self.activity = Activity(
project=self.project,
group=self.group,
type=ActivityType.SET_RESOLVED.value,
data={"fingerprint": ["test_fingerprint"]},
)
self.activity.save()
self.detector = self.create_detector(type=MetricIssue.slug)
@mock.patch("sentry.workflow_engine.tasks.workflows.logger")
def test_process_workflow_activity__no_workflows(self, mock_logger) -> None:
with mock.patch(
"sentry.workflow_engine.processors.workflow.evaluate_workflow_triggers",
return_value=set(),
) as mock_evaluate:
process_workflow_activity(
activity_id=self.activity.id,
group_id=self.group.id,
detector_id=self.detector.id,
)
# Short-circuit evaluation, no workflows associated
assert mock_evaluate.call_count == 0
mock_logger.info.assert_called_once_with(
"workflow_engine.process_workflows.evaluation.workflows.not_triggered",
extra={
"workflow_ids": None,
"detection_type": self.detector.type,
"event_id": None,
"group_id": self.activity.group.id,
"action_filter_group_ids": [],
"triggered_action_ids": [],
"triggered_workflow_ids": [],
"delayed_conditions": None,
"debug_msg": "No workflows are associated with the detector in the event",
},
)
@mock.patch(
"sentry.workflow_engine.processors.workflow.evaluate_workflow_triggers",
return_value=(set(), {}),
)
@mock.patch(
"sentry.workflow_engine.processors.workflow.evaluate_workflows_action_filters",
return_value=set(),
)
@mock.patch("sentry.workflow_engine.tasks.workflows.logger")
def test_process_workflow_activity__workflows__no_actions(
self, mock_logger, mock_eval_actions, mock_evaluate
):
self.workflow = self.create_workflow(organization=self.organization)
self.create_detector_workflow(
detector=self.detector,
workflow=self.workflow,
)
process_workflow_activity(
activity_id=self.activity.id,
group_id=self.group.id,
detector_id=self.detector.id,
)
event_data = WorkflowEventData(
event=self.activity,
group=self.group,
)
mock_evaluate.assert_called_once_with({self.workflow}, event_data, mock.ANY)
assert mock_eval_actions.call_count == 0
mock_logger.info.assert_called_once_with(
"workflow_engine.process_workflows.evaluation.workflows.triggered",
extra={
"workflow_ids": [self.workflow.id],
"detection_type": self.detector.type,
"group_id": self.activity.group.id,
"event_id": None,
"action_filter_group_ids": [],
"triggered_action_ids": [],
"triggered_workflow_ids": [],
"delayed_conditions": None,
"debug_msg": "No items were triggered or queued for slow evaluation",
},
)
@mock.patch("sentry.workflow_engine.processors.action.filter_recently_fired_workflow_actions")
@mock.patch("sentry.workflow_engine.tasks.workflows.logger")
def test_process_workflow_activity(
self, mock_logger, mock_filter_actions: mock.MagicMock
) -> None:
self.workflow = self.create_workflow(organization=self.organization)
self.action_group = self.create_data_condition_group(logic_type="any-short")
self.action = self.create_action()
self.create_data_condition_group_action(
condition_group=self.action_group,
action=self.action,
)
self.create_workflow_data_condition_group(self.workflow, self.action_group)
self.create_detector_workflow(
detector=self.detector,
workflow=self.workflow,
)
expected_event_data = WorkflowEventData(
event=self.activity,
group=self.group,
)
process_workflow_activity(
activity_id=self.activity.id,
group_id=self.group.id,
detector_id=self.detector.id,
)
mock_filter_actions.assert_called_once_with({self.action_group}, expected_event_data)
@mock.patch("sentry.workflow_engine.processors.workflow.evaluate_workflow_triggers")
@mock.patch("sentry.workflow_engine.tasks.workflows.logger")
def test_process_workflow_activity__success_logs(
self, mock_logger, mock_evaluate_workflow_triggers
) -> None:
self.workflow = self.create_workflow(organization=self.organization)
# Add additional data to ensure logs work as expected
self.workflow.when_condition_group = self.create_data_condition_group()
self.create_data_condition(condition_group=self.workflow.when_condition_group)
self.workflow.save()
self.action_group = self.create_data_condition_group(logic_type="any-short")
self.action = self.create_action()
self.create_data_condition_group_action(
condition_group=self.action_group,
action=self.action,
)
self.create_workflow_data_condition_group(self.workflow, self.action_group)
self.create_detector_workflow(
detector=self.detector,
workflow=self.workflow,
)
mock_evaluate_workflow_triggers.return_value = ({self.workflow}, {})
process_workflow_activity(
activity_id=self.activity.id,
group_id=self.group.id,
detector_id=self.detector.id,
)
mock_logger.info.assert_called_once_with(
"workflow_engine.process_workflows.evaluation.actions.triggered",
extra={
"workflow_ids": [self.workflow.id],
"detection_type": self.detector.type,
"group_id": self.activity.group.id,
"event_id": None,
"action_filter_group_ids": [self.action_group.id],
"triggered_action_ids": [self.action.id],
"triggered_workflow_ids": [self.workflow.id],
"delayed_conditions": None,
"debug_msg": None,
},
)
@mock.patch(
"sentry.workflow_engine.models.incident_groupopenperiod.update_incident_based_on_open_period_status_change"
) # rollout code that is independently tested
@mock.patch("sentry.workflow_engine.tasks.workflows.metrics.incr")
def test__e2e__issue_plat_to_processed(
self, mock_incr: mock.MagicMock, mock_update_igop: mock.MagicMock
) -> None:
self.message = StatusChangeMessageData(
id="test-id",
fingerprint=["group-1"],
project_id=self.project.id,
new_status=GroupStatus.RESOLVED,
new_substatus=None,
detector_id=self.detector.id,
activity_data={},
)
with self.tasks():
update_status(self.group, self.message)
# Issue platform is forwarding the activity update
mock_incr.assert_any_call(
"workflow_engine.issue_platform.status_change_handler",
amount=1,
tags={"activity_type": self.activity.type},
sample_rate=1.0,
)
# Workflow engine is correctly registered for the activity update
mock_incr.assert_any_call(
"workflow_engine.tasks.process_workflows.activity_update",
tags={"activity_type": self.activity.type},
)
# Workflow engine evaluated activity update in process_workflows
mock_incr.assert_any_call(
"workflow_engine.tasks.process_workflows.activity_update.executed",
tags={
"activity_type": self.activity.type,
"detector_type": self.detector.type,
},
sample_rate=1.0,
)
@mock.patch("sentry.issues.status_change_consumer.get_group_from_fingerprint")
@mock.patch(
"sentry.workflow_engine.models.incident_groupopenperiod.update_incident_based_on_open_period_status_change"
) # rollout code that is independently tested
@mock.patch("sentry.workflow_engine.tasks.workflows.metrics.incr")
def test__e2e__issue_plat_to_processed_activity_data_is_set(
self,
mock_incr: mock.MagicMock,
mock_update_igop: mock.MagicMock,
mock_get_group_from_fingerprint: mock.MagicMock,
) -> None:
mock_get_group_from_fingerprint.return_value = self.group
self.message = StatusChangeMessageData(
id="test-id",
fingerprint=["test-fingerprint"],
project_id=self.project.id,
new_status=GroupStatus.RESOLVED,
new_substatus=None,
detector_id=self.detector.id,
activity_data={"test": "test"},
)
with (
self.tasks(),
sentry_sdk.start_transaction(
op="process_status_change_message",
name="issues.status_change_consumer",
) as txn,
):
process_status_change_message(self.message, txn)
# Issue platform is forwarding the activity update
mock_incr.assert_any_call(
"workflow_engine.issue_platform.status_change_handler",
amount=1,
tags={"activity_type": self.activity.type},
sample_rate=1.0,
)
# Workflow engine is correctly registered for the activity update
mock_incr.assert_any_call(
"workflow_engine.tasks.process_workflows.activity_update",
tags={"activity_type": self.activity.type},
)
# Workflow engine evaluated activity update in process_workflows
mock_incr.assert_any_call(
"workflow_engine.tasks.process_workflows.activity_update.executed",
tags={
"activity_type": self.activity.type,
"detector_type": self.detector.type,
},
sample_rate=1.0,
)
# Check that the activity data is correctly stored in the database and the data is populated correctly
with assume_test_silo_mode_of(Activity):
latest_activity = (
Activity.objects.filter(group_id=self.group.id, type=self.activity.type)
.order_by("-datetime")
.first()
)
assert latest_activity is not None
assert latest_activity.data == {
"test": "test",
}
| TestProcessWorkflowActivity |
python | fastapi__sqlmodel | docs_src/tutorial/many_to_many/tutorial001.py | {
"start": 608,
"end": 2434
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
teams: List[Team] = Relationship(back_populates="heroes", link_model=HeroTeamLink)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond",
secret_name="Dive Wilson",
teams=[team_z_force, team_preventers],
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
teams=[team_preventers],
)
hero_spider_boy = Hero(
name="Spider-Boy", secret_name="Pedro Parqueador", teams=[team_preventers]
)
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Deadpond:", hero_deadpond)
print("Deadpond teams:", hero_deadpond.teams)
print("Rusty-Man:", hero_rusty_man)
print("Rusty-Man Teams:", hero_rusty_man.teams)
print("Spider-Boy:", hero_spider_boy)
print("Spider-Boy Teams:", hero_spider_boy.teams)
def main():
create_db_and_tables()
create_heroes()
if __name__ == "__main__":
main()
| Hero |
python | scipy__scipy | scipy/linalg/tests/test_fblas.py | {
"start": 16371,
"end": 16443
} | class ____(BaseGer):
blas_func = fblas.sger
dtype = float32
| TestSger |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 26997,
"end": 28402
} | class ____(MeanMetricWrapper):
"""Calculates how often predictions match binary labels.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
Standalone usage:
>>> m = tf.keras.metrics.BinaryAccuracy()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
>>> m.result().numpy()
0.75
>>> m.reset_state()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.BinaryAccuracy()])
```
"""
def __init__(self, name='binary_accuracy', dtype=None, threshold=0.5):
super(BinaryAccuracy, self).__init__(
binary_accuracy, name, dtype=dtype, threshold=threshold)
| BinaryAccuracy |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_batch.py | {
"start": 2977,
"end": 3858
} | class ____(BaseAwsLinksTestCase):
link_class = BatchJobQueueLink
def test_extra_link(self, mock_supervisor_comms):
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key=self.link_class.key,
value={
"region_name": "us-east-1",
"aws_domain": self.link_class.get_aws_domain("aws"),
"aws_partition": "aws",
"job_queue_arn": "arn:fake:jq",
},
)
self.assert_extra_link_url(
expected_url=(
"https://console.aws.amazon.com/batch/home?region=us-east-1#queues/detail/arn:fake:jq"
),
region_name="us-east-1",
aws_partition="aws",
job_queue_arn="arn:fake:jq",
)
| TestBatchJobQueueLink |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/anno.py | {
"start": 1003,
"end": 1347
} | class ____(enum.Enum):
"""Base class for different types of AST annotations."""
def of(self, node, default=None):
return getanno(node, self, default=default)
def add_to(self, node, value):
setanno(node, self, value)
def exists(self, node):
return hasanno(node, self)
def __repr__(self):
return str(self.name)
| NoValue |
python | pytorch__pytorch | torchgen/local.py | {
"start": 768,
"end": 2167
} | class ____(threading.local):
use_const_ref_for_mutable_tensors: bool | None = None
use_ilistref_for_tensor_lists: bool | None = None
_locals = Locals()
def use_const_ref_for_mutable_tensors() -> bool:
assert _locals.use_const_ref_for_mutable_tensors is not None, (
"need to initialize local.use_const_ref_for_mutable_tensors with "
"local.parametrize"
)
return _locals.use_const_ref_for_mutable_tensors
def use_ilistref_for_tensor_lists() -> bool:
assert _locals.use_ilistref_for_tensor_lists is not None, (
"need to initialize local.use_ilistref_for_tensor_lists with local.parametrize"
)
return _locals.use_ilistref_for_tensor_lists
@contextmanager
def parametrize(
*, use_const_ref_for_mutable_tensors: bool, use_ilistref_for_tensor_lists: bool
) -> Iterator[None]:
old_use_const_ref_for_mutable_tensors = _locals.use_const_ref_for_mutable_tensors
old_use_ilistref_for_tensor_lists = _locals.use_ilistref_for_tensor_lists
try:
_locals.use_const_ref_for_mutable_tensors = use_const_ref_for_mutable_tensors
_locals.use_ilistref_for_tensor_lists = use_ilistref_for_tensor_lists
yield
finally:
_locals.use_const_ref_for_mutable_tensors = (
old_use_const_ref_for_mutable_tensors
)
_locals.use_ilistref_for_tensor_lists = old_use_ilistref_for_tensor_lists
| Locals |
python | facebook__pyre-check | client/language_server/protocol.py | {
"start": 11717,
"end": 11926
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
uri: str
def document_uri(self) -> DocumentUri:
return DocumentUri.parse(self.uri)
@dataclasses.dataclass(frozen=True)
| TextDocumentIdentifier |
python | Lightning-AI__lightning | src/lightning/pytorch/strategies/parallel.py | {
"start": 1163,
"end": 5093
} | class ____(Strategy, ABC):
"""Strategy for training with multiple processes in parallel."""
def __init__(
self,
accelerator: Optional["pl.accelerators.Accelerator"] = None,
parallel_devices: Optional[list[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
checkpoint_io: Optional[CheckpointIO] = None,
precision_plugin: Optional[Precision] = None,
):
super().__init__(accelerator=accelerator, checkpoint_io=checkpoint_io, precision_plugin=precision_plugin)
self.parallel_devices = parallel_devices
self.cluster_environment: Optional[ClusterEnvironment] = cluster_environment
self._layer_sync: Optional[LayerSync] = None
@property
@abstractmethod
@override
def root_device(self) -> torch.device:
"""Return the root device."""
@property
def global_rank(self) -> int:
return self.cluster_environment.global_rank() if self.cluster_environment is not None else 0
@property
def local_rank(self) -> int:
return self.cluster_environment.local_rank() if self.cluster_environment is not None else 0
@property
def node_rank(self) -> int:
return self.cluster_environment.node_rank() if self.cluster_environment is not None else 0
@property
def world_size(self) -> int:
return self.cluster_environment.world_size() if self.cluster_environment is not None else 1
@property
@override
def is_global_zero(self) -> bool:
return self.global_rank == 0
@property
def parallel_devices(self) -> Optional[list[torch.device]]:
return self._parallel_devices
@parallel_devices.setter
def parallel_devices(self, parallel_devices: Optional[list[torch.device]]) -> None:
self._parallel_devices = parallel_devices
@property
def distributed_sampler_kwargs(self) -> dict[str, Any]:
return {
"num_replicas": len(self.parallel_devices) if self.parallel_devices is not None else 0,
"rank": self.global_rank,
}
@override
def all_gather(self, tensor: Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> Tensor:
"""Perform a all_gather on all processes."""
return _all_gather_ddp_if_available(tensor, group=group, sync_grads=sync_grads)
@override
def reduce_boolean_decision(self, decision: bool, all: bool = True) -> bool:
"""Reduces a boolean decision over distributed processes. By default is analogous to ``all`` from the standard
library, returning ``True`` only if all input decisions evaluate to ``True``. If ``all`` is set to ``False``,
it behaves like ``any`` instead.
Args:
decision: A single input decision.
all: Whether to logically emulate ``all`` or ``any``. Defaults to True.
Returns:
bool: The reduced boolean decision.
"""
decision = torch.tensor(int(decision), device=self.root_device)
decision = self.reduce(
decision,
reduce_op=ReduceOp.SUM, # type: ignore[arg-type]
)
decision = bool(decision == self.world_size) if all else bool(decision)
return decision
@contextmanager
def block_backward_sync(self) -> Generator:
"""Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
Returns: context manager with sync behaviour off
"""
if isinstance(self.model, pl.utilities.types.DistributedDataParallel):
with self.model.no_sync():
yield None
else:
yield None
@override
def teardown(self) -> None:
assert self.cluster_environment is not None
self.cluster_environment.teardown()
super().teardown()
| ParallelStrategy |
python | doocs__leetcode | solution/1700-1799/1756.Design Most Recently Used Queue/Solution2.py | {
"start": 362,
"end": 937
} | class ____:
def __init__(self, n: int):
self.q = list(range(n + 1))
self.tree = BinaryIndexedTree(n + 2010)
def fetch(self, k: int) -> int:
l, r = 1, len(self.q)
while l < r:
mid = (l + r) >> 1
if mid - self.tree.query(mid) >= k:
r = mid
else:
l = mid + 1
x = self.q[l]
self.q.append(x)
self.tree.update(l, 1)
return x
# Your MRUQueue object will be instantiated and called as such:
# obj = MRUQueue(n)
# param_1 = obj.fetch(k)
| MRUQueue |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 13667,
"end": 13973
} | class ____(PydanticTypeError):
code = 'arbitrary_type'
msg_template = 'instance of {expected_arbitrary_type} expected'
def __init__(self, *, expected_arbitrary_type: Type[Any]) -> None:
super().__init__(expected_arbitrary_type=display_as_type(expected_arbitrary_type))
| ArbitraryTypeError |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_returned.py | {
"start": 685,
"end": 840
} | class ____:
""" __getnewargs__ returns an integer """
def __getnewargs__(self): # [invalid-getnewargs-returned]
return 1
| FirstBadGetNewArgs |
python | django__django | tests/model_fields/models.py | {
"start": 5236,
"end": 6681
} | class ____(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2", default=False)
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.DateField("verbose field4")
field5 = models.DateTimeField("verbose field5")
field6 = models.DecimalField("verbose field6", max_digits=6, decimal_places=1)
field7 = models.EmailField("verbose field7")
field8 = models.FileField(
"verbose field8", storage=temp_storage, upload_to="unused"
)
field9 = models.FilePathField("verbose field9")
field10 = models.FloatField("verbose field10")
# Don't want to depend on Pillow in this test
# field_image = models.ImageField("verbose field")
field11 = models.IntegerField("verbose field11")
field12 = models.GenericIPAddressField("verbose field12", protocol="ipv4")
field13 = models.PositiveIntegerField("verbose field13")
field14 = models.PositiveSmallIntegerField("verbose field14")
field15 = models.SlugField("verbose field15")
field16 = models.SmallIntegerField("verbose field16")
field17 = models.TextField("verbose field17")
field18 = models.TimeField("verbose field18")
field19 = models.URLField("verbose field19")
field20 = models.UUIDField("verbose field20")
field21 = models.DurationField("verbose field21")
| VerboseNameField |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 23406,
"end": 23588
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("DONE", "IN_PROGRESS", "TODO")
| ProjectColumnPurpose |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_project_forms.py | {
"start": 36531,
"end": 39400
} | class ____(TestCase):
def setUp(self):
self.project = get(Project)
def test_webhookform(self):
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
"url": "http://www.example.com/",
"payload": "{}",
"events": [WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id],
}
form = WebHookForm(data=data, project=self.project)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.project.webhook_notifications.all().count(), 1)
data = {
"url": "https://www.example.com/",
"payload": "{}",
"events": [WebHookEvent.objects.get(name=WebHookEvent.BUILD_PASSED).id],
}
form = WebHookForm(data=data, project=self.project)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.project.webhook_notifications.all().count(), 2)
def test_wrong_inputs_in_webhookform(self):
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
"url": "",
"payload": "{}",
"events": [WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id],
}
form = WebHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {"url": ["This field is required."]})
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
"url": "wrong-url",
"payload": "{}",
"events": [WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id],
}
form = WebHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {"url": ["Enter a valid URL."]})
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
"url": "https://example.com/webhook/",
"payload": "{wrong json object}",
"events": [WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id],
}
form = WebHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(
form.errors, {"payload": ["The payload must be a valid JSON object."]}
)
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
"url": "https://example.com/webhook/",
"payload": "{}",
"events": [],
}
form = WebHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {"events": ["This field is required."]})
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
| TestWebhookForm |
python | run-llama__llama_index | llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/events.py | {
"start": 93,
"end": 154
} | class ____(BaseVoiceAgentEvent):
data: bytes
| AudioSentEvent |
python | jazzband__django-model-utils | tests/models.py | {
"start": 12725,
"end": 13046
} | class ____(models.Model):
custom_field = CustomDescriptorField()
tracked_custom_field = CustomDescriptorField()
regular_field = models.IntegerField()
tracked_regular_field = models.IntegerField()
tracker = FieldTracker(fields=['tracked_custom_field', 'tracked_regular_field'])
| ModelWithCustomDescriptor |
python | allegroai__clearml | clearml/backend_api/services/v2_9/models.py | {
"start": 38180,
"end": 40031
} | class ____(Response):
"""
Response of models.edit endpoint.
:param updated: Number of models updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "models"
_action = "edit"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of models updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(EditResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| EditResponse |
python | huggingface__transformers | src/transformers/models/afmoe/modular_afmoe.py | {
"start": 1740,
"end": 1780
} | class ____(Qwen2MoeMLP):
pass
| AfmoeMLP |
python | Textualize__textual | docs/examples/how-to/center02.py | {
"start": 80,
"end": 440
} | class ____(App):
"""How to center things."""
CSS = """
Screen {
align: center middle;
}
#hello {
background: blue 50%;
border: wide white;
}
"""
def compose(self) -> ComposeResult:
yield Static("Hello, World!", id="hello")
if __name__ == "__main__":
app = CenterApp()
app.run()
| CenterApp |
python | django__django | tests/select_related/models.py | {
"start": 1879,
"end": 1999
} | class ____(models.Model):
name = models.CharField(max_length=100)
toppings = models.ManyToManyField(Topping)
| Pizza |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/configurable.py | {
"start": 1040,
"end": 9832
} | class ____(RunnableSerializable[Input, Output]):
"""Serializable `Runnable` that can be dynamically configured.
A `DynamicRunnable` should be initiated using the `configurable_fields` or
`configurable_alternatives` method of a `Runnable`.
"""
default: RunnableSerializable[Input, Output]
"""The default `Runnable` to use."""
config: RunnableConfig | None = None
"""The configuration to use."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@classmethod
@override
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
return True
@classmethod
@override
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "schema", "runnable"]`
"""
return ["langchain", "schema", "runnable"]
@property
@override
def InputType(self) -> type[Input]:
return self.default.InputType
@property
@override
def OutputType(self) -> type[Output]:
return self.default.OutputType
@override
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
runnable, config = self.prepare(config)
return runnable.get_input_schema(config)
@override
def get_output_schema(
self, config: RunnableConfig | None = None
) -> type[BaseModel]:
runnable, config = self.prepare(config)
return runnable.get_output_schema(config)
@override
def get_graph(self, config: RunnableConfig | None = None) -> Graph:
runnable, config = self.prepare(config)
return runnable.get_graph(config)
@override
def with_config(
self,
config: RunnableConfig | None = None,
# Sadly Unpack is not well supported by mypy so this will have to be untyped
**kwargs: Any,
) -> Runnable[Input, Output]:
return self.__class__(
**{**self.__dict__, "config": ensure_config(merge_configs(config, kwargs))} # type: ignore[arg-type]
)
def prepare(
self, config: RunnableConfig | None = None
) -> tuple[Runnable[Input, Output], RunnableConfig]:
"""Prepare the `Runnable` for invocation.
Args:
config: The configuration to use.
Returns:
The prepared `Runnable` and configuration.
"""
runnable: Runnable[Input, Output] = self
while isinstance(runnable, DynamicRunnable):
runnable, config = runnable._prepare(merge_configs(runnable.config, config)) # noqa: SLF001
return runnable, cast("RunnableConfig", config)
@abstractmethod
def _prepare(
self, config: RunnableConfig | None = None
) -> tuple[Runnable[Input, Output], RunnableConfig]: ...
@override
def invoke(
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
) -> Output:
runnable, config = self.prepare(config)
return runnable.invoke(input, config, **kwargs)
@override
async def ainvoke(
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
) -> Output:
runnable, config = self.prepare(config)
return await runnable.ainvoke(input, config, **kwargs)
@override
def batch(
self,
inputs: list[Input],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Output]:
configs = get_config_list(config, len(inputs))
prepared = [self.prepare(c) for c in configs]
if all(p is self.default for p, _ in prepared):
return self.default.batch(
inputs,
[c for _, c in prepared],
return_exceptions=return_exceptions,
**kwargs,
)
if not inputs:
return []
def invoke(
prepared: tuple[Runnable[Input, Output], RunnableConfig],
input_: Input,
) -> Output | Exception:
bound, config = prepared
if return_exceptions:
try:
return bound.invoke(input_, config, **kwargs)
except Exception as e:
return e
else:
return bound.invoke(input_, config, **kwargs)
# If there's only one input, don't bother with the executor
if len(inputs) == 1:
return cast("list[Output]", [invoke(prepared[0], inputs[0])])
with get_executor_for_config(configs[0]) as executor:
return cast("list[Output]", list(executor.map(invoke, prepared, inputs)))
@override
async def abatch(
self,
inputs: list[Input],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Output]:
configs = get_config_list(config, len(inputs))
prepared = [self.prepare(c) for c in configs]
if all(p is self.default for p, _ in prepared):
return await self.default.abatch(
inputs,
[c for _, c in prepared],
return_exceptions=return_exceptions,
**kwargs,
)
if not inputs:
return []
async def ainvoke(
prepared: tuple[Runnable[Input, Output], RunnableConfig],
input_: Input,
) -> Output | Exception:
bound, config = prepared
if return_exceptions:
try:
return await bound.ainvoke(input_, config, **kwargs)
except Exception as e:
return e
else:
return await bound.ainvoke(input_, config, **kwargs)
coros = map(ainvoke, prepared, inputs)
return await gather_with_concurrency(configs[0].get("max_concurrency"), *coros)
@override
def stream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Output]:
runnable, config = self.prepare(config)
return runnable.stream(input, config, **kwargs)
@override
async def astream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Output]:
runnable, config = self.prepare(config)
async for chunk in runnable.astream(input, config, **kwargs):
yield chunk
@override
def transform(
self,
input: Iterator[Input],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Output]:
runnable, config = self.prepare(config)
return runnable.transform(input, config, **kwargs)
@override
async def atransform(
self,
input: AsyncIterator[Input],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Output]:
runnable, config = self.prepare(config)
async for chunk in runnable.atransform(input, config, **kwargs):
yield chunk
@override
def __getattr__(self, name: str) -> Any: # type: ignore[misc]
attr = getattr(self.default, name)
if callable(attr):
@wraps(attr)
def wrapper(*args: Any, **kwargs: Any) -> Any:
for key, arg in kwargs.items():
if key == "config" and (
isinstance(arg, dict)
and "configurable" in arg
and isinstance(arg["configurable"], dict)
):
runnable, config = self.prepare(cast("RunnableConfig", arg))
kwargs = {**kwargs, "config": config}
return getattr(runnable, name)(*args, **kwargs)
for idx, arg in enumerate(args):
if (
isinstance(arg, dict)
and "configurable" in arg
and isinstance(arg["configurable"], dict)
):
runnable, config = self.prepare(cast("RunnableConfig", arg))
argsl = list(args)
argsl[idx] = config
return getattr(runnable, name)(*argsl, **kwargs)
if self.config:
runnable, config = self.prepare()
return getattr(runnable, name)(*args, **kwargs)
return attr(*args, **kwargs)
return wrapper
return attr
| DynamicRunnable |
python | getsentry__sentry | tests/sentry/incidents/endpoints/test_organization_alert_rule_details.py | {
"start": 8438,
"end": 29370
} | class ____(AlertRuleDetailsBase):
def test_simple(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
with self.feature("organizations:incidents"):
resp = self.get_success_response(self.organization.slug, self.alert_rule.id)
assert resp.data == serialize(self.alert_rule, serializer=DetailedAlertRuleSerializer())
def test_workflow_engine_serializer(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
ard = AlertRuleDetector.objects.get(alert_rule_id=self.alert_rule.id)
self.detector = Detector.objects.get(id=ard.detector_id)
with (
self.feature("organizations:incidents"),
self.feature("organizations:workflow-engine-rule-serializers"),
):
resp = self.get_success_response(self.organization.slug, self.alert_rule.id)
assert resp.data == serialize(self.detector, serializer=WorkflowEngineDetectorSerializer())
def test_aggregate_translation(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
alert_rule = self.create_alert_rule(aggregate="count_unique(tags[sentry:user])")
with self.feature("organizations:incidents"):
resp = self.get_success_response(self.organization.slug, alert_rule.id)
assert resp.data["aggregate"] == "count_unique(user)"
assert alert_rule.snuba_query.aggregate == "count_unique(tags[sentry:user])"
def test_expand_latest_incident(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
incident = self.create_incident(
organization=self.organization,
title="Incident #1",
projects=[self.project],
alert_rule=self.alert_rule,
status=IncidentStatus.CRITICAL.value,
)
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, self.alert_rule.id, expand=["latestIncident"]
)
no_expand_resp = self.get_success_response(self.organization.slug, self.alert_rule.id)
assert resp.data["latestIncident"] is not None
assert resp.data["latestIncident"]["id"] == str(incident.id)
assert "latestIncident" not in no_expand_resp.data
@with_feature("organizations:anomaly-detection-alerts")
@with_feature("organizations:incidents")
def test_static_detection_type(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
rule = self.create_alert_rule() # the default detection type is static
trigger = self.create_alert_rule_trigger(rule, "hi", 1000)
self.create_alert_rule_trigger_action(alert_rule_trigger=trigger)
resp = self.get_success_response(self.organization.slug, rule.id)
assert rule.detection_type == AlertRuleDetectionType.STATIC
assert rule.detection_type == resp.data.get("detectionType")
# Confirm that we don't mess up flow for customers who don't know about detection_type field yet
rule2 = self.create_alert_rule(comparison_delta=60)
trigger2 = self.create_alert_rule_trigger(rule, "heyo", 1000)
self.create_alert_rule_trigger_action(alert_rule_trigger=trigger2)
resp = self.get_success_response(self.organization.slug, rule2.id)
assert rule2.detection_type == AlertRuleDetectionType.PERCENT
assert rule2.detection_type == resp.data.get("detectionType")
with pytest.raises(
ValidationError,
match="Sensitivity is not a valid field for this alert type",
):
# STATIC detection types shouldn't have seasonality or sensitivity
self.create_alert_rule(
seasonality=AlertRuleSeasonality.AUTO, sensitivity=AlertRuleSensitivity.HIGH
)
with pytest.raises(
ValidationError,
match="Above and below is not a valid threshold type for this alert type",
):
self.create_alert_rule(threshold_type=AlertRuleThresholdType.ABOVE_AND_BELOW)
@with_feature("organizations:anomaly-detection-alerts")
@with_feature("organizations:incidents")
def test_percent_detection_type(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
rule = self.create_alert_rule(
comparison_delta=60, detection_type=AlertRuleDetectionType.PERCENT
)
trigger = self.create_alert_rule_trigger(rule, "hi", 1000)
self.create_alert_rule_trigger_action(alert_rule_trigger=trigger)
resp = self.get_success_response(self.organization.slug, rule.id)
assert rule.detection_type == resp.data.get("detectionType")
with pytest.raises(
ValidationError, match="Percentage-based alerts require a comparison delta"
):
self.create_alert_rule(
detection_type=AlertRuleDetectionType.PERCENT
) # PERCENT detection type requires a comparison delta
with pytest.raises(
ValidationError,
match="Sensitivity is not a valid field for this alert type",
):
# PERCENT detection type should not have sensitivity or seasonality
self.create_alert_rule(
seasonality=AlertRuleSeasonality.AUTO,
sensitivity=AlertRuleSensitivity.HIGH,
detection_type=AlertRuleDetectionType.PERCENT,
)
with pytest.raises(
ValidationError,
match="Above and below is not a valid threshold type for this alert type",
):
self.create_alert_rule(
threshold_type=AlertRuleThresholdType.ABOVE_AND_BELOW,
detection_type=AlertRuleDetectionType.PERCENT,
)
@with_feature("organizations:anomaly-detection-alerts")
@with_feature("organizations:incidents")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_dynamic_detection_type(self, mock_seer_request: MagicMock) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
rule = self.create_alert_rule(
seasonality=AlertRuleSeasonality.AUTO,
sensitivity=AlertRuleSensitivity.HIGH,
threshold_type=AlertRuleThresholdType.ABOVE_AND_BELOW,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=30,
)
trigger = self.create_alert_rule_trigger(rule, "hi", 0)
self.create_alert_rule_trigger_action(alert_rule_trigger=trigger)
resp = self.get_success_response(self.organization.slug, rule.id)
assert rule.detection_type == resp.data.get("detectionType")
with pytest.raises(ValidationError, match="Dynamic alerts require a sensitivity level"):
self.create_alert_rule(
seasonality=AlertRuleSeasonality.AUTO,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=30,
) # Require both seasonality and sensitivity
# TODO: uncomment this test when seasonality becomes a supported field
# with pytest.raises(
# ValidationError, match="Dynamic alerts require both sensitivity and seasonality"
# ):
# self.create_alert_rule(
# sensitivity=AlertRuleSensitivity.MEDIUM,
# detection_type=AlertRuleDetectionType.DYNAMIC,
# time_window=30,
# ) # Require both seasonality and sensitivity
with pytest.raises(ValidationError, match="Dynamic alerts require a sensitivity level"):
self.create_alert_rule(
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=30,
) # DYNAMIC detection type requires seasonality and sensitivity
with pytest.raises(
ValidationError, match="Comparison delta is not a valid field for this alert type"
):
# DYNAMIC detection type should not have comparison delta
self.create_alert_rule(
seasonality=AlertRuleSeasonality.AUTO,
sensitivity=AlertRuleSensitivity.HIGH,
comparison_delta=60,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=30,
)
with pytest.raises(ValidationError, match="Invalid time window for dynamic alert"):
rule = self.create_alert_rule(
seasonality=AlertRuleSeasonality.AUTO,
sensitivity=AlertRuleSensitivity.HIGH,
threshold_type=AlertRuleThresholdType.ABOVE_AND_BELOW,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=1,
)
with pytest.raises(
ValidationError, match="Dynamic alerts do not support 'is:unresolved' queries"
):
rule = self.create_alert_rule(
seasonality=AlertRuleSeasonality.AUTO,
sensitivity=AlertRuleSensitivity.HIGH,
threshold_type=AlertRuleThresholdType.ABOVE_AND_BELOW,
detection_type=AlertRuleDetectionType.DYNAMIC,
time_window=30,
query="is:unresolved",
)
@with_feature("organizations:anomaly-detection-alerts")
@with_feature("organizations:incidents")
def test_missing_threshold(self) -> None:
"""Test that we throw a validation error when the trigger is missing alertThreshold"""
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
data = deepcopy(self.dynamic_alert_rule_dict)
del data["triggers"][0]["alertThreshold"]
serializer = AlertRuleSerializer(
context={
"organization": self.organization,
"access": OrganizationGlobalAccess(self.organization, settings.SENTRY_SCOPES),
"user": self.user,
"installations": app_service.installations_for_organization(
organization_id=self.organization.id
),
},
data=data,
)
assert not serializer.is_valid(), serializer.errors
assert serializer.errors["nonFieldErrors"][0] == "Trigger must have an alertThreshold"
@responses.activate
def test_with_sentryapp_success(self) -> None:
self.superuser = self.create_user("admin@localhost", is_superuser=True)
self.login_as(user=self.superuser)
self.create_team(organization=self.organization, members=[self.superuser])
sentry_app = self.create_sentry_app(
organization=self.organization,
published=True,
verify_install=False,
name="Super Awesome App",
schema={"elements": [self.create_alert_rule_action_schema()]},
)
self.create_sentry_app_installation(
slug=sentry_app.slug, organization=self.organization, user=self.superuser
)
rule = self.create_alert_rule()
trigger = self.create_alert_rule_trigger(rule, "hi", 1000)
self.create_alert_rule_trigger_action(
alert_rule_trigger=trigger,
target_identifier=sentry_app.id,
type=AlertRuleTriggerAction.Type.SENTRY_APP,
target_type=AlertRuleTriggerAction.TargetType.SENTRY_APP,
sentry_app=sentry_app,
sentry_app_config=[
{"name": "title", "value": "An alert"},
{"summary": "Something happened here..."},
{"name": "points", "value": "3"},
{"name": "assignee", "value": "Nisanthan"},
],
)
responses.add(
responses.GET,
"https://example.com/sentry/members",
json=[
{"value": "bob", "label": "Bob"},
{"value": "jess", "label": "Jess"},
],
status=200,
)
with self.feature("organizations:incidents"):
resp = self.get_response(self.organization.slug, rule.id)
assert resp.status_code == 200
assert len(responses.calls) == 1
assert "errors" not in resp.data
action = resp.data["triggers"][0]["actions"][0]
assert "select" == action["formFields"]["optional_fields"][-1]["type"]
assert "sentry/members" in action["formFields"]["optional_fields"][-1]["uri"]
assert "bob" == action["formFields"]["optional_fields"][-1]["choices"][0][0]
@responses.activate
def test_with_sentryapp_multiple_installations_filters_by_org(self) -> None:
self.superuser = self.create_user("admin@localhost", is_superuser=True)
self.login_as(user=self.superuser)
self.create_team(organization=self.organization, members=[self.superuser])
org2 = self.create_organization(name="org2")
sentry_app = self.create_sentry_app(
organization=self.organization,
published=True,
verify_install=False,
name="Super Awesome App",
schema={"elements": [self.create_alert_rule_action_schema()]},
)
self.create_sentry_app_installation(
slug=sentry_app.slug, organization=self.organization, user=self.superuser
)
self.create_sentry_app_installation(
slug=sentry_app.slug, organization=org2, user=self.superuser
)
get_context_response = app_service.get_component_contexts(
filter=dict(app_ids=[sentry_app.id], organization_id=self.organization.id),
component_type="alert-rule-action",
)
rule = self.create_alert_rule()
trigger = self.create_alert_rule_trigger(rule, "hi", 1000)
self.create_alert_rule_trigger_action(
alert_rule_trigger=trigger,
target_identifier=sentry_app.id,
type=AlertRuleTriggerAction.Type.SENTRY_APP,
target_type=AlertRuleTriggerAction.TargetType.SENTRY_APP,
sentry_app=sentry_app,
sentry_app_config=[
{"name": "title", "value": "An alert"},
{"summary": "Something happened here..."},
{"name": "points", "value": "3"},
{"name": "assignee", "value": "Nisanthan"},
],
)
responses.add(
responses.GET,
"https://example.com/sentry/members",
json=[
{"value": "bob", "label": "Bob"},
{"value": "jess", "label": "Jess"},
],
status=200,
)
with self.feature("organizations:incidents"):
with mock.patch.object(app_service, "get_component_contexts") as mock_get:
mock_get.return_value = get_context_response
resp = self.get_response(self.organization.slug, rule.id)
assert mock_get.call_count == 1
mock_get.assert_called_with(
filter={
"app_ids": [sentry_app.id],
"organization_id": self.organization.id,
},
component_type="alert-rule-action",
)
assert resp.status_code == 200
assert len(responses.calls) == 1
assert "errors" not in resp.data
action = resp.data["triggers"][0]["actions"][0]
assert "select" == action["formFields"]["optional_fields"][-1]["type"]
assert "sentry/members" in action["formFields"]["optional_fields"][-1]["uri"]
assert "bob" == action["formFields"]["optional_fields"][-1]["choices"][0][0]
@responses.activate
def test_with_unresponsive_sentryapp(self) -> None:
self.superuser = self.create_user("admin@localhost", is_superuser=True)
self.login_as(user=self.superuser)
self.create_team(organization=self.organization, members=[self.superuser])
self.sentry_app = self.create_sentry_app(
organization=self.organization,
published=True,
verify_install=False,
name="Super Awesome App",
schema={"elements": [self.create_alert_rule_action_schema()]},
)
self.installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.organization, user=self.superuser
)
self.rule = self.create_alert_rule()
trigger = self.create_alert_rule_trigger(self.rule, "hi", 1000)
self.create_alert_rule_trigger_action(
alert_rule_trigger=trigger,
target_identifier=self.sentry_app.id,
type=AlertRuleTriggerAction.Type.SENTRY_APP,
target_type=AlertRuleTriggerAction.TargetType.SENTRY_APP,
sentry_app=self.sentry_app,
sentry_app_config=[
{"name": "title", "value": "An alert"},
{"summary": "Something happened here..."},
{"name": "points", "value": "3"},
{"name": "assignee", "value": "Nisanthan"},
],
)
responses.add(responses.GET, "http://example.com/sentry/members", json={}, status=404)
with self.feature("organizations:incidents"):
resp = self.get_response(self.organization.slug, self.rule.id)
assert resp.status_code == 200
# Returns errors while fetching
assert len(resp.data["errors"]) == 1
assert resp.data["errors"][0] == {
"detail": "Could not fetch details from Super Awesome App"
}
# Disables the SentryApp
assert (
resp.data["triggers"][0]["actions"][0]["sentryAppInstallationUuid"]
== self.installation.uuid
)
assert resp.data["triggers"][0]["actions"][0]["disabled"] is True
def test_with_snooze_rule(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
rule_snooze = self.snooze_rule(
user_id=self.user.id, owner_id=self.user.id, alert_rule=self.alert_rule
)
with self.feature("organizations:incidents"):
response = self.get_success_response(self.organization.slug, self.alert_rule.id)
assert response.data["snooze"]
assert response.data["snoozeCreatedBy"] == "You"
rule_snooze.owner_id = None
rule_snooze.save()
response = self.get_success_response(self.organization.slug, self.alert_rule.id)
assert response.data["snooze"]
assert "snoozeCreatedBy" not in response.data
def test_with_snooze_rule_everyone(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
user2 = self.create_user("user2@example.com")
self.snooze_rule(owner_id=user2.id, alert_rule=self.alert_rule)
with self.feature("organizations:incidents"):
response = self.get_success_response(self.organization.slug, self.alert_rule.id)
assert response.data["snooze"]
assert response.data["snoozeCreatedBy"] == user2.get_display_name()
@patch("sentry.incidents.serializers.alert_rule.are_any_projects_error_upsampled")
def test_get_shows_count_when_stored_as_upsampled_count(
self, mock_are_any_projects_error_upsampled
) -> None:
"""Test GET returns count() to user even when stored as upsampled_count() internally"""
mock_are_any_projects_error_upsampled.return_value = True
# Set up user membership FIRST before accessing self.alert_rule
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
# Now access and modify the alert rule to have upsampled_count() internally
# (simulating what would happen if it was created with count() on upsampled project)
self.alert_rule.snuba_query.aggregate = "upsampled_count()"
self.alert_rule.snuba_query.save()
with self.feature("organizations:incidents"):
resp = self.get_success_response(self.organization.slug, self.alert_rule.id)
assert (
resp.data["aggregate"] == "count()"
), "GET should return count() to user, hiding internal upsampled_count() storage"
| AlertRuleDetailsGetEndpointTest |
python | huggingface__transformers | src/transformers/models/rag/modeling_rag.py | {
"start": 15762,
"end": 22328
} | class ____(PreTrainedModel):
config: RagConfig
base_model_prefix = "rag"
_supports_flash_attn = True
_supports_sdpa = True
@classmethod
def from_pretrained_question_encoder_generator(
cls,
question_encoder_pretrained_model_name_or_path: Optional[str] = None,
generator_pretrained_model_name_or_path: Optional[str] = None,
retriever: Optional[RagRetriever] = None,
**kwargs,
) -> PreTrainedModel:
r"""
Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
model checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you need to first set it back in training mode with `model.train()`.
Params:
question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the question encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the generator. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
retriever ([`RagRetriever`], *optional*):
The retriever to use.
kwwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the question_encoder configuration, use the prefix *question_encoder_* for each
configuration parameter.
- To update the generator configuration, use the prefix *generator_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import RagModel
>>> # initialize a RAG from two pretrained models.
>>> model = RagModel.from_pretrained_question_encoder_generator(
... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small"
... )
>>> # saving model after fine-tuning
>>> model.save_pretrained("./rag")
>>> # load fine-tuned model
>>> model = RagModel.from_pretrained("./rag")
```"""
kwargs_question_encoder = {
argument[len("question_encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("question_encoder_")
}
kwargs_generator = {
argument[len("generator_") :]: value
for argument, value in kwargs.items()
if argument.startswith("generator_")
}
# remove question_encoder, generator kwargs from kwargs
for key in kwargs_question_encoder:
del kwargs["question_encoder_" + key]
for key in kwargs_generator:
del kwargs["generator_" + key]
# Load and initialize the question_encoder and generator
# The distinction between question_encoder and generator at the model level is made
# by the value of the flag `is_generator` that we need to set correctly.
question_encoder = kwargs_question_encoder.pop("model", None)
if question_encoder is None:
assert question_encoder_pretrained_model_name_or_path is not None, (
"If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to"
" be defined"
)
from ..auto.modeling_auto import AutoModel
if "config" not in kwargs_question_encoder:
from ..auto.configuration_auto import AutoConfig
question_encoder_config, kwargs_question_encoder = AutoConfig.from_pretrained(
question_encoder_pretrained_model_name_or_path,
**kwargs_question_encoder,
return_unused_kwargs=True,
)
kwargs_question_encoder["config"] = question_encoder_config
question_encoder = AutoModel.from_pretrained(
question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder
)
generator = kwargs_generator.pop("model", None)
if generator is None:
assert generator_pretrained_model_name_or_path is not None, (
"If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has"
" to be defined"
)
from ..auto.modeling_auto import AutoModelForSeq2SeqLM
if "config" not in kwargs_generator:
from ..auto.configuration_auto import AutoConfig
generator_config, kwargs_generator = AutoConfig.from_pretrained(
generator_pretrained_model_name_or_path, **kwargs_generator, return_unused_kwargs=True
)
kwargs_generator["config"] = generator_config
generator = AutoModelForSeq2SeqLM.from_pretrained(
generator_pretrained_model_name_or_path, **kwargs_generator
)
# instantiate config with corresponding kwargs
config = kwargs.get("config")
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kwargs
)
return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
@auto_docstring
| RagPreTrainedModel |
python | walkccc__LeetCode | solutions/2179. Count Good Triplets in an Array/2179.py | {
"start": 421,
"end": 1374
} | class ____:
def goodTriplets(self, nums1: list[int], nums2: list[int]) -> int:
n = len(nums1)
numToIndex = {num: i for i, num in enumerate(nums1)}
# Remap each number in `nums2` to the according index in `nums1` as `arr`.
# So the problem is to find the number of increasing tripets in `arr`.
arr = [numToIndex[num] for num in nums2]
# leftSmaller[i] := the number of arr[j] < arr[i], where 0 <= j < i
leftSmaller = [0] * n
# rightLarger[i] := the number of arr[j] > arr[i], where i < j < n
rightLarger = [0] * n
tree1 = FenwickTree(n) # Calculates `leftSmaller`.
tree2 = FenwickTree(n) # Calculates `rightLarger`.
for i, a in enumerate(arr):
leftSmaller[i] = tree1.get(a)
tree1.add(a + 1, 1)
for i, a in reversed(list(enumerate(arr))):
rightLarger[i] = tree2.get(n) - tree2.get(a)
tree2.add(a + 1, 1)
return sum(a * b for a, b in zip(leftSmaller, rightLarger))
| Solution |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax.py | {
"start": 1662,
"end": 1746
} | class ____:
my_var: int | str
@my_decorator
@dataclasses.dataclass
| CustomDataClass3 |
python | keras-team__keras | keras/src/layers/preprocessing/feature_space.py | {
"start": 2498,
"end": 30082
} | class ____(Layer):
"""One-stop utility for preprocessing and encoding structured data.
Arguments:
feature_names: Dict mapping the names of your features to their
type specification, e.g. `{"my_feature": "integer_categorical"}`
or `{"my_feature": FeatureSpace.integer_categorical()}`.
For a complete list of all supported types, see
"Available feature types" paragraph below.
output_mode: One of `"concat"` or `"dict"`. In concat mode, all
features get concatenated together into a single vector.
In dict mode, the FeatureSpace returns a dict of individually
encoded features (with the same keys as the input dict keys).
crosses: List of features to be crossed together, e.g.
`crosses=[("feature_1", "feature_2")]`. The features will be
"crossed" by hashing their combined value into
a fixed-length vector.
crossing_dim: Default vector size for hashing crossed features.
Defaults to `32`.
hashing_dim: Default vector size for hashing features of type
`"integer_hashed"` and `"string_hashed"`. Defaults to `32`.
num_discretization_bins: Default number of bins to be used for
discretizing features of type `"float_discretized"`.
Defaults to `32`.
**Available feature types:**
Note that all features can be referred to by their string name,
e.g. `"integer_categorical"`. When using the string name, the default
argument values are used.
```python
# Plain float values.
FeatureSpace.float(name=None)
# Float values to be preprocessed via featurewise standardization
# (i.e. via a `keras.layers.Normalization` layer).
FeatureSpace.float_normalized(name=None)
# Float values to be preprocessed via linear rescaling
# (i.e. via a `keras.layers.Rescaling` layer).
FeatureSpace.float_rescaled(scale=1., offset=0., name=None)
# Float values to be discretized. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.float_discretized(
num_bins, bin_boundaries=None, output_mode="one_hot", name=None)
# Integer values to be indexed. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.integer_categorical(
max_tokens=None, num_oov_indices=1, output_mode="one_hot", name=None)
# String values to be indexed. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.string_categorical(
max_tokens=None, num_oov_indices=1, output_mode="one_hot", name=None)
# Integer values to be hashed into a fixed number of bins.
# By default, the discrete representation will then be one-hot encoded.
FeatureSpace.integer_hashed(num_bins, output_mode="one_hot", name=None)
# String values to be hashed into a fixed number of bins.
# By default, the discrete representation will then be one-hot encoded.
FeatureSpace.string_hashed(num_bins, output_mode="one_hot", name=None)
```
Examples:
**Basic usage with a dict of input data:**
```python
raw_data = {
"float_values": [0.0, 0.1, 0.2, 0.3],
"string_values": ["zero", "one", "two", "three"],
"int_values": [0, 1, 2, 3],
}
dataset = tf.data.Dataset.from_tensor_slices(raw_data)
feature_space = FeatureSpace(
features={
"float_values": "float_normalized",
"string_values": "string_categorical",
"int_values": "integer_categorical",
},
crosses=[("string_values", "int_values")],
output_mode="concat",
)
# Before you start using the FeatureSpace,
# you must `adapt()` it on some data.
feature_space.adapt(dataset)
# You can call the FeatureSpace on a dict of data (batched or unbatched).
output_vector = feature_space(raw_data)
```
**Basic usage with `tf.data`:**
```python
# Unlabeled data
preprocessed_ds = unlabeled_dataset.map(feature_space)
# Labeled data
preprocessed_ds = labeled_dataset.map(lambda x, y: (feature_space(x), y))
```
**Basic usage with the Keras Functional API:**
```python
# Retrieve a dict Keras Input objects
inputs = feature_space.get_inputs()
# Retrieve the corresponding encoded Keras tensors
encoded_features = feature_space.get_encoded_features()
# Build a Functional model
outputs = keras.layers.Dense(1, activation="sigmoid")(encoded_features)
model = keras.Model(inputs, outputs)
```
**Customizing each feature or feature cross:**
```python
feature_space = FeatureSpace(
features={
"float_values": FeatureSpace.float_normalized(),
"string_values": FeatureSpace.string_categorical(max_tokens=10),
"int_values": FeatureSpace.integer_categorical(max_tokens=10),
},
crosses=[
FeatureSpace.cross(("string_values", "int_values"), crossing_dim=32)
],
output_mode="concat",
)
```
**Returning a dict of integer-encoded features:**
```python
feature_space = FeatureSpace(
features={
"string_values": FeatureSpace.string_categorical(output_mode="int"),
"int_values": FeatureSpace.integer_categorical(output_mode="int"),
},
crosses=[
FeatureSpace.cross(
feature_names=("string_values", "int_values"),
crossing_dim=32,
output_mode="int",
)
],
output_mode="dict",
)
```
**Specifying your own Keras preprocessing layer:**
```python
# Let's say that one of the features is a short text paragraph that
# we want to encode as a vector (one vector per paragraph) via TF-IDF.
data = {
"text": ["1st string", "2nd string", "3rd string"],
}
# There's a Keras layer for this: TextVectorization.
custom_layer = layers.TextVectorization(output_mode="tf_idf")
# We can use FeatureSpace.feature to create a custom feature
# that will use our preprocessing layer.
feature_space = FeatureSpace(
features={
"text": FeatureSpace.feature(
preprocessor=custom_layer, dtype="string", output_mode="float"
),
},
output_mode="concat",
)
feature_space.adapt(tf.data.Dataset.from_tensor_slices(data))
output_vector = feature_space(data)
```
**Retrieving the underlying Keras preprocessing layers:**
```python
# The preprocessing layer of each feature is available in `.preprocessors`.
preprocessing_layer = feature_space.preprocessors["feature1"]
# The crossing layer of each feature cross is available in `.crossers`.
# It's an instance of keras.layers.HashedCrossing.
crossing_layer = feature_space.crossers["feature1_X_feature2"]
```
**Saving and reloading a FeatureSpace:**
```python
feature_space.save("featurespace.keras")
reloaded_feature_space = keras.models.load_model("featurespace.keras")
```
"""
@classmethod
def cross(cls, feature_names, crossing_dim, output_mode="one_hot"):
return Cross(feature_names, crossing_dim, output_mode=output_mode)
@classmethod
def feature(cls, dtype, preprocessor, output_mode):
return Feature(dtype, preprocessor, output_mode)
@classmethod
def float(cls, name=None):
name = name or auto_name("float")
preprocessor = TFDIdentity(dtype="float32", name=f"{name}_preprocessor")
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_rescaled(cls, scale=1.0, offset=0.0, name=None):
name = name or auto_name("float_rescaled")
preprocessor = layers.Rescaling(
scale=scale, offset=offset, name=f"{name}_preprocessor"
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_normalized(cls, name=None):
name = name or auto_name("float_normalized")
preprocessor = layers.Normalization(
axis=-1, name=f"{name}_preprocessor"
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_discretized(
cls, num_bins, bin_boundaries=None, output_mode="one_hot", name=None
):
name = name or auto_name("float_discretized")
preprocessor = layers.Discretization(
num_bins=num_bins,
bin_boundaries=bin_boundaries,
name=f"{name}_preprocessor",
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def integer_categorical(
cls,
max_tokens=None,
num_oov_indices=1,
output_mode="one_hot",
name=None,
):
name = name or auto_name("integer_categorical")
preprocessor = layers.IntegerLookup(
name=f"{name}_preprocessor",
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
)
return Feature(
dtype="int32", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def string_categorical(
cls,
max_tokens=None,
num_oov_indices=1,
output_mode="one_hot",
name=None,
):
name = name or auto_name("string_categorical")
preprocessor = layers.StringLookup(
name=f"{name}_preprocessor",
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
)
return Feature(
dtype="string", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def string_hashed(cls, num_bins, output_mode="one_hot", name=None):
name = name or auto_name("string_hashed")
preprocessor = layers.Hashing(
name=f"{name}_preprocessor", num_bins=num_bins
)
return Feature(
dtype="string", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def integer_hashed(cls, num_bins, output_mode="one_hot", name=None):
name = name or auto_name("integer_hashed")
preprocessor = layers.Hashing(
name=f"{name}_preprocessor", num_bins=num_bins
)
return Feature(
dtype="int32", preprocessor=preprocessor, output_mode=output_mode
)
def __init__(
self,
features,
output_mode="concat",
crosses=None,
crossing_dim=32,
hashing_dim=32,
num_discretization_bins=32,
name=None,
):
super().__init__(name=name)
if not features:
raise ValueError("The `features` argument cannot be None or empty.")
self.crossing_dim = crossing_dim
self.hashing_dim = hashing_dim
self.num_discretization_bins = num_discretization_bins
self.features = {
name: self._standardize_feature(name, value)
for name, value in features.items()
}
self.crosses = []
if crosses:
feature_set = set(features.keys())
for cross in crosses:
if isinstance(cross, dict):
cross = serialization_lib.deserialize_keras_object(cross)
if isinstance(cross, Cross):
self.crosses.append(cross)
else:
if not crossing_dim:
raise ValueError(
"When specifying `crosses`, the argument "
"`crossing_dim` "
"(dimensionality of the crossing space) "
"should be specified as well."
)
for key in cross:
if key not in feature_set:
raise ValueError(
"All features referenced "
"in the `crosses` argument "
"should be present in the `features` dict. "
f"Received unknown features: {cross}"
)
self.crosses.append(Cross(cross, crossing_dim=crossing_dim))
self.crosses_by_name = {cross.name: cross for cross in self.crosses}
if output_mode not in {"dict", "concat"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'dict', 'concat'}. "
f"Received: output_mode={output_mode}"
)
self.output_mode = output_mode
self.inputs = {
name: self._feature_to_input(name, value)
for name, value in self.features.items()
}
self.preprocessors = {
name: value.preprocessor for name, value in self.features.items()
}
self.encoded_features = None
self.crossers = {
cross.name: self._cross_to_crosser(cross) for cross in self.crosses
}
self.one_hot_encoders = {}
self._is_adapted = False
self.concat = None
self._preprocessed_features_names = None
self._crossed_features_names = None
self._sublayers_built = False
def _feature_to_input(self, name, feature):
return layers.Input(shape=(1,), dtype=feature.dtype, name=name)
def _standardize_feature(self, name, feature):
if isinstance(feature, Feature):
return feature
if isinstance(feature, dict):
return serialization_lib.deserialize_keras_object(feature)
if feature == "float":
return self.float(name=name)
elif feature == "float_normalized":
return self.float_normalized(name=name)
elif feature == "float_rescaled":
return self.float_rescaled(name=name)
elif feature == "float_discretized":
return self.float_discretized(
name=name, num_bins=self.num_discretization_bins
)
elif feature == "integer_categorical":
return self.integer_categorical(name=name)
elif feature == "string_categorical":
return self.string_categorical(name=name)
elif feature == "integer_hashed":
return self.integer_hashed(self.hashing_dim, name=name)
elif feature == "string_hashed":
return self.string_hashed(self.hashing_dim, name=name)
else:
raise ValueError(f"Invalid feature type: {feature}")
def _cross_to_crosser(self, cross):
return layers.HashedCrossing(cross.crossing_dim, name=cross.name)
def _list_adaptable_preprocessors(self):
adaptable_preprocessors = []
for name in self.features.keys():
preprocessor = self.preprocessors[name]
# Special case: a Normalization layer with preset mean/variance.
# Not adaptable.
if isinstance(preprocessor, layers.Normalization):
if preprocessor.input_mean is not None:
continue
# Special case: a TextVectorization layer with provided vocabulary.
elif isinstance(preprocessor, layers.TextVectorization):
if preprocessor._has_input_vocabulary:
continue
if hasattr(preprocessor, "adapt"):
adaptable_preprocessors.append(name)
return adaptable_preprocessors
def adapt(self, dataset):
if not isinstance(dataset, tf.data.Dataset):
raise ValueError(
"`adapt()` can only be called on a tf.data.Dataset. "
f"Received instead: {dataset} (of type {type(dataset)})"
)
for name in self._list_adaptable_preprocessors():
# Call adapt() on each individual adaptable layer.
# TODO: consider rewriting this to instead iterate on the
# dataset once, split each batch into individual features,
# and call the layer's `_adapt_function` on each batch
# to simulate the behavior of adapt() in a more performant fashion.
feature_dataset = dataset.map(lambda x: x[name])
preprocessor = self.preprocessors[name]
# TODO: consider adding an adapt progress bar.
# Sample 1 element to check the rank
x = next(iter(feature_dataset))
if len(x.shape) == 0:
# The dataset yields unbatched scalars; batch it.
feature_dataset = feature_dataset.batch(32)
if len(x.shape) in {0, 1}:
# If the rank is 1, add a dimension
# so we can reduce on axis=-1.
# Note: if rank was previously 0, it is now 1.
feature_dataset = feature_dataset.map(
lambda x: tf.expand_dims(x, -1)
)
preprocessor.adapt(feature_dataset)
self._is_adapted = True
self.get_encoded_features() # Finish building the layer
self.built = True
self._sublayers_built = True
def get_inputs(self):
self._check_if_built()
return self.inputs
def get_encoded_features(self):
self._check_if_adapted()
if self.encoded_features is None:
preprocessed_features = self._preprocess_features(self.inputs)
crossed_features = self._cross_features(preprocessed_features)
merged_features = self._merge_features(
preprocessed_features, crossed_features
)
self.encoded_features = merged_features
return self.encoded_features
def _preprocess_features(self, features):
return {
name: self.preprocessors[name](features[name])
for name in features.keys()
}
def _cross_features(self, features):
all_outputs = {}
for cross in self.crosses:
inputs = [features[name] for name in cross.feature_names]
outputs = self.crossers[cross.name](inputs)
all_outputs[cross.name] = outputs
return all_outputs
def _merge_features(self, preprocessed_features, crossed_features):
if not self._preprocessed_features_names:
self._preprocessed_features_names = sorted(
preprocessed_features.keys()
)
self._crossed_features_names = sorted(crossed_features.keys())
all_names = (
self._preprocessed_features_names + self._crossed_features_names
)
all_features = [
preprocessed_features[name]
for name in self._preprocessed_features_names
] + [crossed_features[name] for name in self._crossed_features_names]
if self.output_mode == "dict":
output_dict = {}
else:
features_to_concat = []
if self._sublayers_built:
# Fast mode.
for name, feature in zip(all_names, all_features):
encoder = self.one_hot_encoders.get(name, None)
if encoder:
feature = encoder(feature)
if self.output_mode == "dict":
output_dict[name] = feature
else:
features_to_concat.append(feature)
if self.output_mode == "dict":
return output_dict
else:
return self.concat(features_to_concat)
# If the object isn't built,
# we create the encoder and concat layers below
all_specs = [
self.features[name] for name in self._preprocessed_features_names
] + [
self.crosses_by_name[name] for name in self._crossed_features_names
]
for name, feature, spec in zip(all_names, all_features, all_specs):
if tree.is_nested(feature):
dtype = tree.flatten(feature)[0].dtype
else:
dtype = feature.dtype
dtype = backend.standardize_dtype(dtype)
if spec.output_mode == "one_hot":
preprocessor = self.preprocessors.get(
name
) or self.crossers.get(name)
cardinality = None
if not dtype.startswith("int"):
raise ValueError(
f"Feature '{name}' has `output_mode='one_hot'`. "
"Thus its preprocessor should return an integer dtype. "
f"Instead it returns a {dtype} dtype."
)
if isinstance(
preprocessor, (layers.IntegerLookup, layers.StringLookup)
):
cardinality = preprocessor.vocabulary_size()
elif isinstance(preprocessor, layers.CategoryEncoding):
cardinality = preprocessor.num_tokens
elif isinstance(preprocessor, layers.Discretization):
cardinality = preprocessor.num_bins
elif isinstance(
preprocessor, (layers.HashedCrossing, layers.Hashing)
):
cardinality = preprocessor.num_bins
else:
raise ValueError(
f"Feature '{name}' has `output_mode='one_hot'`. "
"However it isn't a standard feature and the "
"dimensionality of its output space is not known, "
"thus it cannot be one-hot encoded. "
"Try using `output_mode='int'`."
)
if cardinality is not None:
encoder = layers.CategoryEncoding(
num_tokens=cardinality, output_mode="multi_hot"
)
self.one_hot_encoders[name] = encoder
feature = encoder(feature)
if self.output_mode == "concat":
dtype = feature.dtype
if dtype.startswith("int") or dtype == "string":
raise ValueError(
f"Cannot concatenate features because feature '{name}' "
f"has not been encoded (it has dtype {dtype}). "
"Consider using `output_mode='dict'`."
)
features_to_concat.append(feature)
else:
output_dict[name] = feature
if self.output_mode == "concat":
self.concat = TFDConcat(axis=-1)
return self.concat(features_to_concat)
else:
return output_dict
def _check_if_adapted(self):
if not self._is_adapted:
if not self._list_adaptable_preprocessors():
self._is_adapted = True
else:
raise ValueError(
"You need to call `.adapt(dataset)` on the FeatureSpace "
"before you can start using it."
)
def _check_if_built(self):
if not self._sublayers_built:
self._check_if_adapted()
# Finishes building
self.get_encoded_features()
self._sublayers_built = True
def _convert_input(self, x):
if not isinstance(x, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
if not isinstance(x, (list, tuple, int, float)):
x = backend.convert_to_numpy(x)
x = tf.convert_to_tensor(x)
return x
def __call__(self, data):
self._check_if_built()
if not isinstance(data, dict):
raise ValueError(
"A FeatureSpace can only be called with a dict. "
f"Received: data={data} (of type {type(data)}"
)
# Many preprocessing layers support all backends but many do not.
# Switch to TF to make FeatureSpace work universally.
data = {key: self._convert_input(value) for key, value in data.items()}
rebatched = False
for name, x in data.items():
if len(x.shape) == 0:
data[name] = tf.reshape(x, (1, 1))
rebatched = True
elif len(x.shape) == 1:
data[name] = tf.expand_dims(x, -1)
with backend_utils.TFGraphScope():
# This scope is to make sure that inner DataLayers
# will not convert outputs back to backend-native --
# they should be TF tensors throughout
preprocessed_data = self._preprocess_features(data)
preprocessed_data = tree.map_structure(
lambda x: self._convert_input(x), preprocessed_data
)
crossed_data = self._cross_features(preprocessed_data)
crossed_data = tree.map_structure(
lambda x: self._convert_input(x), crossed_data
)
merged_data = self._merge_features(preprocessed_data, crossed_data)
if rebatched:
if self.output_mode == "concat":
assert merged_data.shape[0] == 1
if (
backend.backend() != "tensorflow"
and not backend_utils.in_tf_graph()
):
merged_data = backend.convert_to_numpy(merged_data)
merged_data = tf.squeeze(merged_data, axis=0)
else:
for name, x in merged_data.items():
if len(x.shape) == 2 and x.shape[0] == 1:
merged_data[name] = tf.squeeze(x, axis=0)
if (
backend.backend() != "tensorflow"
and not backend_utils.in_tf_graph()
):
merged_data = tree.map_structure(
lambda x: backend.convert_to_tensor(x, dtype=x.dtype),
merged_data,
)
return merged_data
def get_config(self):
return {
"features": serialization_lib.serialize_keras_object(self.features),
"output_mode": self.output_mode,
"crosses": serialization_lib.serialize_keras_object(self.crosses),
"crossing_dim": self.crossing_dim,
"hashing_dim": self.hashing_dim,
"num_discretization_bins": self.num_discretization_bins,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def get_build_config(self):
return {
name: feature.preprocessor.get_build_config()
for name, feature in self.features.items()
}
def build_from_config(self, config):
for name in config.keys():
preprocessor = self.features[name].preprocessor
if not preprocessor.built:
preprocessor.build_from_config(config[name])
self._is_adapted = True
def save(self, filepath):
"""Save the `FeatureSpace` instance to a `.keras` file.
You can reload it via `keras.models.load_model()`:
```python
feature_space.save("featurespace.keras")
reloaded_fs = keras.models.load_model("featurespace.keras")
```
"""
saving_lib.save_model(self, filepath)
def save_own_variables(self, store):
return
def load_own_variables(self, store):
return
| FeatureSpace |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_collection_membership_file_urls.py | {
"start": 1243,
"end": 1533
} | class ____(
GQLResult
):
page_info: PageInfoFragment = Field(alias="pageInfo")
edges: List[
ArtifactCollectionMembershipFileUrlsProjectArtifactCollectionArtifactMembershipFilesEdges
]
| ArtifactCollectionMembershipFileUrlsProjectArtifactCollectionArtifactMembershipFiles |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0116_mark_fields_as_null.py | {
"start": 150,
"end": 7982
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0115_add_addonsconfig_history"),
]
operations = [
migrations.AlterField(
model_name="historicalproject",
name="conf_py_file",
field=models.CharField(
blank=True,
default="",
help_text="Path from project root to <code>conf.py</code> file (ex. <code>docs/conf.py</code>). Leave blank if you want us to find it for you.",
max_length=255,
null=True,
verbose_name="Python configuration file",
),
),
migrations.AlterField(
model_name="historicalproject",
name="documentation_type",
field=models.CharField(
choices=[
("sphinx", "Sphinx Html"),
("mkdocs", "Mkdocs"),
("sphinx_htmldir", "Sphinx HtmlDir"),
("sphinx_singlehtml", "Sphinx Single Page HTML"),
],
default=None,
null=True,
blank=True,
help_text='Type of documentation you are building. <a href="http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info on sphinx builders</a>.',
max_length=20,
verbose_name="Documentation type",
),
),
migrations.AlterField(
model_name="historicalproject",
name="enable_epub_build",
field=models.BooleanField(
blank=True,
default=False,
help_text="Create a EPUB version of your documentation with each build.",
null=True,
verbose_name="Enable EPUB build",
),
),
migrations.AlterField(
model_name="historicalproject",
name="enable_pdf_build",
field=models.BooleanField(
blank=True,
default=False,
help_text="Create a PDF version of your documentation with each build.",
null=True,
verbose_name="Enable PDF build",
),
),
migrations.AlterField(
model_name="historicalproject",
name="install_project",
field=models.BooleanField(
blank=True,
default=False,
help_text="Install your project inside a virtualenv using <code>setup.py install</code>",
null=True,
verbose_name="Install Project",
),
),
migrations.AlterField(
model_name="historicalproject",
name="path",
field=models.CharField(
blank=True,
editable=False,
help_text="The directory where <code>conf.py</code> lives",
max_length=255,
null=True,
verbose_name="Path",
),
),
migrations.AlterField(
model_name="historicalproject",
name="python_interpreter",
field=models.CharField(
blank=True,
choices=[("python", "CPython 2.x"), ("python3", "CPython 3.x")],
default="python3",
help_text="The Python interpreter used to create the virtual environment.",
max_length=20,
null=True,
verbose_name="Python Interpreter",
),
),
migrations.AlterField(
model_name="historicalproject",
name="use_system_packages",
field=models.BooleanField(
blank=True,
default=False,
help_text="Give the virtual environment access to the global site-packages dir.",
null=True,
verbose_name="Use system packages",
),
),
migrations.AlterField(
model_name="project",
name="conf_py_file",
field=models.CharField(
blank=True,
default="",
help_text="Path from project root to <code>conf.py</code> file (ex. <code>docs/conf.py</code>). Leave blank if you want us to find it for you.",
max_length=255,
null=True,
verbose_name="Python configuration file",
),
),
migrations.AlterField(
model_name="project",
name="documentation_type",
field=models.CharField(
choices=[
("sphinx", "Sphinx Html"),
("mkdocs", "Mkdocs"),
("sphinx_htmldir", "Sphinx HtmlDir"),
("sphinx_singlehtml", "Sphinx Single Page HTML"),
],
default=None,
null=True,
blank=True,
help_text='Type of documentation you are building. <a href="http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info on sphinx builders</a>.',
max_length=20,
verbose_name="Documentation type",
),
),
migrations.AlterField(
model_name="project",
name="enable_epub_build",
field=models.BooleanField(
blank=True,
default=False,
help_text="Create a EPUB version of your documentation with each build.",
null=True,
verbose_name="Enable EPUB build",
),
),
migrations.AlterField(
model_name="project",
name="enable_pdf_build",
field=models.BooleanField(
blank=True,
default=False,
help_text="Create a PDF version of your documentation with each build.",
null=True,
verbose_name="Enable PDF build",
),
),
migrations.AlterField(
model_name="project",
name="install_project",
field=models.BooleanField(
blank=True,
default=False,
help_text="Install your project inside a virtualenv using <code>setup.py install</code>",
null=True,
verbose_name="Install Project",
),
),
migrations.AlterField(
model_name="project",
name="path",
field=models.CharField(
blank=True,
editable=False,
help_text="The directory where <code>conf.py</code> lives",
max_length=255,
null=True,
verbose_name="Path",
),
),
migrations.AlterField(
model_name="project",
name="python_interpreter",
field=models.CharField(
blank=True,
choices=[("python", "CPython 2.x"), ("python3", "CPython 3.x")],
default="python3",
help_text="The Python interpreter used to create the virtual environment.",
max_length=20,
null=True,
verbose_name="Python Interpreter",
),
),
migrations.AlterField(
model_name="project",
name="use_system_packages",
field=models.BooleanField(
blank=True,
default=False,
help_text="Give the virtual environment access to the global site-packages dir.",
null=True,
verbose_name="Use system packages",
),
),
]
| Migration |
python | kamyu104__LeetCode-Solutions | Python/sum-of-all-subset-xor-totals.py | {
"start": 29,
"end": 506
} | class ____(object):
def subsetXORSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# given there are k (k >= 1) nums of which ith bit is 1,
# the bit contributes to sum is:
# (nCr(k, 1) + nCr(k, 3) + ...) * (nCr(n - k, 0) + nCr(n - k, 1) + ...) * 2^i
# = 2^(k-1) * 2^(n-k) = 2^(n-1) * 2^i
result = 0
for x in nums:
result |= x
return result * 2**(len(nums)-1)
| Solution |
python | PyCQA__pylint | doc/data/messages/n/not-async-context-manager/bad.py | {
"start": 0,
"end": 200
} | class ____:
def __enter__(self):
pass
def __exit__(self, *exc):
pass
async def foo():
async with ContextManager(): # [not-async-context-manager]
pass
| ContextManager |
python | huggingface__transformers | src/transformers/models/blenderbot/configuration_blenderbot.py | {
"start": 810,
"end": 7551
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BlenderbotModel`]. It is used to instantiate an
Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Blenderbot
[facebook/blenderbot-3B](https://huggingface.co/facebook/blenderbot-3B) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`BlenderbotModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 128):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 2):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Example:
```python
>>> from transformers import BlenderbotConfig, BlenderbotModel
>>> # Initializing a Blenderbot facebook/blenderbot-3B style configuration
>>> configuration = BlenderbotConfig()
>>> # Initializing a model (with random weights) from the facebook/blenderbot-3B style configuration
>>> model = BlenderbotModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "blenderbot"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=8008,
max_position_embeddings=128,
encoder_layers=2,
encoder_ffn_dim=10240,
encoder_attention_heads=32,
decoder_layers=24,
decoder_ffn_dim=10240,
decoder_attention_heads=32,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=2560,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=1,
scale_embedding=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
encoder_no_repeat_ngram_size=3,
forced_eos_token_id=2,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size,
forced_eos_token_id=forced_eos_token_id,
**kwargs,
)
__all__ = ["BlenderbotConfig"]
| BlenderbotConfig |
python | doocs__leetcode | solution/0700-0799/0731.My Calendar II/Solution2.py | {
"start": 0,
"end": 215
} | class ____:
def __init__(self, l: int, r: int):
self.left = None
self.right = None
self.l = l
self.r = r
self.mid = (l + r) >> 1
self.v = 0
self.add = 0
| Node |
python | vyperlang__vyper | vyper/venom/passes/literals_codesize.py | {
"start": 334,
"end": 1985
} | class ____(IRPass):
def run_pass(self):
for bb in self.function.get_basic_blocks():
self._process_bb(bb)
def _process_bb(self, bb):
for inst in bb.instructions:
if inst.opcode != "assign":
continue
(op,) = inst.operands
if not isinstance(op, IRLiteral):
continue
val = op.value % (2**256)
# calculate amount of bits saved by not optimization
not_benefit = ((len(hex(val)) // 2 - len(hex(evm_not(val))) // 2) - NOT_THRESHOLD) * 8
# calculate amount of bits saved by shl optimization
binz = bin(val)[2:]
ix = len(binz) - binz.rfind("1")
shl_benefit = ix - SHL_THRESHOLD * 8
if not_benefit <= 0 and shl_benefit <= 0:
# no optimization can be done here
continue
if not_benefit >= shl_benefit:
assert not_benefit > 0 # implied by previous conditions
# transform things like 0xffff...01 to (not 0xfe)
inst.opcode = "not"
op.value = evm_not(val)
continue
else:
assert shl_benefit > 0 # implied by previous conditions
# transform things like 0x123400....000 to 0x1234 << ...
ix -= 1
# sanity check
assert (val >> ix) << ix == val, val
assert (val >> ix) & 1 == 1, val
inst.opcode = "shl"
inst.operands = [IRLiteral(val >> ix), IRLiteral(ix)]
continue
| ReduceLiteralsCodesize |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_astype.py | {
"start": 30972,
"end": 31099
} | class ____(pd.core.arrays.IntegerArray):
# GH 42501
def copy(self):
raise NotImplementedError
| IntegerArrayNoCopy |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1261079,
"end": 1283593
} | class ____(
sgqlc.types.Type,
Node,
Actor,
PackageOwner,
ProjectOwner,
ProjectV2Owner,
ProjectV2Recent,
RepositoryDiscussionAuthor,
RepositoryDiscussionCommentAuthor,
RepositoryOwner,
UniformResourceLocatable,
MemberStatusable,
ProfileOwner,
Sponsorable,
AnnouncementBanner,
):
"""An account on GitHub, with one or more owners, that has
repositories, members and teams.
"""
__schema__ = github_schema
__field_names__ = (
"audit_log",
"created_at",
"database_id",
"description",
"description_html",
"domains",
"enterprise_owners",
"interaction_ability",
"ip_allow_list_enabled_setting",
"ip_allow_list_entries",
"ip_allow_list_for_installed_apps_enabled_setting",
"is_verified",
"mannequins",
"members_can_fork_private_repositories",
"members_with_role",
"new_team_resource_path",
"new_team_url",
"notification_delivery_restriction_enabled_setting",
"organization_billing_email",
"pending_members",
"repository_migrations",
"requires_two_factor_authentication",
"ruleset",
"rulesets",
"saml_identity_provider",
"team",
"teams",
"teams_resource_path",
"teams_url",
"twitter_username",
"updated_at",
"viewer_can_administer",
"viewer_can_create_repositories",
"viewer_can_create_teams",
"viewer_is_amember",
"viewer_is_following",
"web_commit_signoff_required",
)
audit_log = sgqlc.types.Field(
sgqlc.types.non_null(OrganizationAuditEntryConnection),
graphql_name="auditLog",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
("order_by", sgqlc.types.Arg(AuditLogOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "DESC"})),
)
),
)
"""Audit log entries of the organization
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `query` (`String`): The query string to filter audit entries
* `order_by` (`AuditLogOrder`): Ordering options for the returned
audit log entries. (default: `{field: CREATED_AT, direction:
DESC}`)
"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
description = sgqlc.types.Field(String, graphql_name="description")
"""The organization's public profile description."""
description_html = sgqlc.types.Field(String, graphql_name="descriptionHTML")
"""The organization's public profile description rendered to HTML."""
domains = sgqlc.types.Field(
VerifiableDomainConnection,
graphql_name="domains",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("is_verified", sgqlc.types.Arg(Boolean, graphql_name="isVerified", default=None)),
("is_approved", sgqlc.types.Arg(Boolean, graphql_name="isApproved", default=None)),
(
"order_by",
sgqlc.types.Arg(VerifiableDomainOrder, graphql_name="orderBy", default={"field": "DOMAIN", "direction": "ASC"}),
),
)
),
)
"""A list of domains owned by the organization.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `is_verified` (`Boolean`): Filter by if the domain is verified.
(default: `null`)
* `is_approved` (`Boolean`): Filter by if the domain is approved.
(default: `null`)
* `order_by` (`VerifiableDomainOrder`): Ordering options for
verifiable domains returned. (default: `{field: DOMAIN,
direction: ASC}`)
"""
enterprise_owners = sgqlc.types.Field(
sgqlc.types.non_null(OrganizationEnterpriseOwnerConnection),
graphql_name="enterpriseOwners",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
("organization_role", sgqlc.types.Arg(RoleInOrganization, graphql_name="organizationRole", default=None)),
(
"order_by",
sgqlc.types.Arg(OrgEnterpriseOwnerOrder, graphql_name="orderBy", default={"field": "LOGIN", "direction": "ASC"}),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of owners of the organization's enterprise account.
Arguments:
* `query` (`String`): The search string to look for.
* `organization_role` (`RoleInOrganization`): The organization
role to filter by.
* `order_by` (`OrgEnterpriseOwnerOrder`): Ordering options for
enterprise owners returned from the connection. (default:
`{field: LOGIN, direction: ASC}`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
interaction_ability = sgqlc.types.Field(RepositoryInteractionAbility, graphql_name="interactionAbility")
"""The interaction ability settings for this organization."""
ip_allow_list_enabled_setting = sgqlc.types.Field(
sgqlc.types.non_null(IpAllowListEnabledSettingValue), graphql_name="ipAllowListEnabledSetting"
)
"""The setting value for whether the organization has an IP allow
list enabled.
"""
ip_allow_list_entries = sgqlc.types.Field(
sgqlc.types.non_null(IpAllowListEntryConnection),
graphql_name="ipAllowListEntries",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
IpAllowListEntryOrder, graphql_name="orderBy", default={"field": "ALLOW_LIST_VALUE", "direction": "ASC"}
),
),
)
),
)
"""The IP addresses that are allowed to access resources owned by the
organization.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`IpAllowListEntryOrder`): Ordering options for IP
allow list entries returned. (default: `{field:
ALLOW_LIST_VALUE, direction: ASC}`)
"""
ip_allow_list_for_installed_apps_enabled_setting = sgqlc.types.Field(
sgqlc.types.non_null(IpAllowListForInstalledAppsEnabledSettingValue), graphql_name="ipAllowListForInstalledAppsEnabledSetting"
)
"""The setting value for whether the organization has IP allow list
configuration for installed GitHub Apps enabled.
"""
is_verified = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isVerified")
"""Whether the organization has verified its profile email and
website.
"""
mannequins = sgqlc.types.Field(
sgqlc.types.non_null(MannequinConnection),
graphql_name="mannequins",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("order_by", sgqlc.types.Arg(MannequinOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "ASC"})),
)
),
)
"""A list of all mannequins for this organization.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`MannequinOrder`): Ordering options for mannequins
returned from the connection. (default: `{field: CREATED_AT,
direction: ASC}`)
"""
members_can_fork_private_repositories = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="membersCanForkPrivateRepositories"
)
"""Members can fork private repositories in this organization"""
members_with_role = sgqlc.types.Field(
sgqlc.types.non_null(OrganizationMemberConnection),
graphql_name="membersWithRole",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of users who are members of this organization.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
new_team_resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="newTeamResourcePath")
"""The HTTP path creating a new team"""
new_team_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="newTeamUrl")
"""The HTTP URL creating a new team"""
notification_delivery_restriction_enabled_setting = sgqlc.types.Field(
sgqlc.types.non_null(NotificationRestrictionSettingValue), graphql_name="notificationDeliveryRestrictionEnabledSetting"
)
"""Indicates if email notification delivery for this organization is
restricted to verified or approved domains.
"""
organization_billing_email = sgqlc.types.Field(String, graphql_name="organizationBillingEmail")
"""The billing email for the organization."""
pending_members = sgqlc.types.Field(
sgqlc.types.non_null(UserConnection),
graphql_name="pendingMembers",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of users who have been invited to join this organization.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
repository_migrations = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryMigrationConnection),
graphql_name="repositoryMigrations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("state", sgqlc.types.Arg(MigrationState, graphql_name="state", default=None)),
("repository_name", sgqlc.types.Arg(String, graphql_name="repositoryName", default=None)),
(
"order_by",
sgqlc.types.Arg(RepositoryMigrationOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "ASC"}),
),
)
),
)
"""A list of all repository migrations for this organization.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `state` (`MigrationState`): Filter repository migrations by
state.
* `repository_name` (`String`): Filter repository migrations by
repository name.
* `order_by` (`RepositoryMigrationOrder`): Ordering options for
repository migrations returned. (default: `{field: CREATED_AT,
direction: ASC}`)
"""
requires_two_factor_authentication = sgqlc.types.Field(Boolean, graphql_name="requiresTwoFactorAuthentication")
"""When true the organization requires all members, billing managers,
and outside collaborators to enable two-factor authentication.
"""
ruleset = sgqlc.types.Field(
"RepositoryRuleset",
graphql_name="ruleset",
args=sgqlc.types.ArgDict((("database_id", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="databaseId", default=None)),)),
)
"""Returns a single ruleset from the current organization by ID.
Arguments:
* `database_id` (`Int!`): The ID of the ruleset to be returned.
"""
rulesets = sgqlc.types.Field(
RepositoryRulesetConnection,
graphql_name="rulesets",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("include_parents", sgqlc.types.Arg(Boolean, graphql_name="includeParents", default=False)),
)
),
)
"""A list of rulesets for this organization.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `include_parents` (`Boolean`): Return rulesets configured at
higher levels that apply to this organization (default: `false`)
"""
saml_identity_provider = sgqlc.types.Field("OrganizationIdentityProvider", graphql_name="samlIdentityProvider")
"""The Organization's SAML identity provider. Visible to (1)
organization owners, (2) organization owners' personal access
tokens (classic) with read:org or admin:org scope, (3) GitHub App
with an installation token with read or write access to members.
"""
team = sgqlc.types.Field(
"Team",
graphql_name="team",
args=sgqlc.types.ArgDict((("slug", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="slug", default=None)),)),
)
"""Find an organization's team by its slug.
Arguments:
* `slug` (`String!`): The name or slug of the team to find.
"""
teams = sgqlc.types.Field(
sgqlc.types.non_null(TeamConnection),
graphql_name="teams",
args=sgqlc.types.ArgDict(
(
("privacy", sgqlc.types.Arg(TeamPrivacy, graphql_name="privacy", default=None)),
("notification_setting", sgqlc.types.Arg(TeamNotificationSetting, graphql_name="notificationSetting", default=None)),
("role", sgqlc.types.Arg(TeamRole, graphql_name="role", default=None)),
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
(
"user_logins",
sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="userLogins", default=None),
),
("order_by", sgqlc.types.Arg(TeamOrder, graphql_name="orderBy", default=None)),
("ldap_mapped", sgqlc.types.Arg(Boolean, graphql_name="ldapMapped", default=None)),
("root_teams_only", sgqlc.types.Arg(Boolean, graphql_name="rootTeamsOnly", default=False)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of teams in this organization.
Arguments:
* `privacy` (`TeamPrivacy`): If non-null, filters teams according
to privacy
* `notification_setting` (`TeamNotificationSetting`): If non-null,
filters teams according to notification setting
* `role` (`TeamRole`): If non-null, filters teams according to
whether the viewer is an admin or member on team
* `query` (`String`): If non-null, filters teams with query on
team name and team slug
* `user_logins` (`[String!]`): User logins to filter by
* `order_by` (`TeamOrder`): Ordering options for teams returned
from the connection
* `ldap_mapped` (`Boolean`): If true, filters teams that are
mapped to an LDAP Group (Enterprise only)
* `root_teams_only` (`Boolean`): If true, restrict to only root
teams (default: `false`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
teams_resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="teamsResourcePath")
"""The HTTP path listing organization's teams"""
teams_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="teamsUrl")
"""The HTTP URL listing organization's teams"""
twitter_username = sgqlc.types.Field(String, graphql_name="twitterUsername")
"""The organization's Twitter username."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
viewer_can_administer = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanAdminister")
"""Organization is adminable by the viewer."""
viewer_can_create_repositories = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanCreateRepositories")
"""Viewer can create repositories on this organization"""
viewer_can_create_teams = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanCreateTeams")
"""Viewer can create teams on this organization."""
viewer_is_amember = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerIsAMember")
"""Viewer is an active member of this organization."""
viewer_is_following = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerIsFollowing")
"""Whether or not this Organization is followed by the viewer."""
web_commit_signoff_required = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="webCommitSignoffRequired")
"""Whether contributors are required to sign off on web-based commits
for repositories in this organization.
"""
| Organization |
python | kamyu104__LeetCode-Solutions | Python/minimum-amount-of-time-to-fill-cups.py | {
"start": 36,
"end": 276
} | class ____(object):
def fillCups(self, amount):
"""
:type amount: List[int]
:rtype: int
"""
return max(max(amount), (sum(amount)+1)//2)
# Time: O(1)
# Space: O(1)
# constructive algorithms
| Solution |
python | nedbat__coveragepy | tests/test_html.py | {
"start": 15191,
"end": 16878
} | class ____(HtmlTestHelpers, CoverageTest):
"""Tests of the HTML title support."""
def test_default_title(self) -> None:
self.create_initial_files()
self.run_coverage()
index = self.get_html_index_content()
assert "<title>Coverage report</title>" in index
assert "<h1>Coverage report:" in index
def test_title_set_in_config_file(self) -> None:
self.create_initial_files()
self.make_file(".coveragerc", "[html]\ntitle = Metrics & stuff!\n")
self.run_coverage()
index = self.get_html_index_content()
assert "<title>Metrics & stuff!</title>" in index
assert "<h1>Metrics & stuff!:" in index
def test_non_ascii_title_set_in_config_file(self) -> None:
self.create_initial_files()
self.make_file(".coveragerc", "[html]\ntitle = «ταБЬℓσ» numbers")
self.run_coverage()
index = self.get_html_index_content()
assert "<title>«ταБЬℓσ» numbers" in index
assert "<h1>«ταБЬℓσ» numbers" in index
def test_title_set_in_args(self) -> None:
self.create_initial_files()
self.make_file(".coveragerc", "[html]\ntitle = Good title\n")
self.run_coverage(htmlargs=dict(title="«ταБЬℓσ» & stüff!"))
index = self.get_html_index_content()
expected = (
"<title>«ταБЬℓσ» "
+ "& stüff!</title>"
)
assert expected in index
assert "<h1>«ταБЬℓσ» & stüff!:" in index
| HtmlTitleTest |
python | doocs__leetcode | solution/2200-2299/2232.Minimize Result by Adding Parentheses to Expression/Solution.py | {
"start": 0,
"end": 543
} | class ____:
def minimizeResult(self, expression: str) -> str:
l, r = expression.split("+")
m, n = len(l), len(r)
mi = inf
ans = None
for i in range(m):
for j in range(n):
c = int(l[i:]) + int(r[: j + 1])
a = 1 if i == 0 else int(l[:i])
b = 1 if j == n - 1 else int(r[j + 1 :])
if (t := a * b * c) < mi:
mi = t
ans = f"{l[:i]}({l[i:]}+{r[: j + 1]}){r[j + 1:]}"
return ans
| Solution |
python | walkccc__LeetCode | solutions/544. Output Contest Matches/544-2.py | {
"start": 0,
"end": 262
} | class ____:
def findContestMatch(self, n: int) -> str:
matches = [str(i + 1) for i in range(n)]
while n > 1:
for i in range(n // 2):
matches[i] = '(' + matches[i] + ',' + matches[n - 1 - i] + ')'
n //= 2
return matches[0]
| Solution |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_format_returned.py | {
"start": 597,
"end": 662
} | class ____:
"""Format through the metaclass."""
| ThirdGoodFormat |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py | {
"start": 33343,
"end": 38594
} | class ____:
"""Comprehensive tests for the PrefixTreeActor"""
async def test_tree_structure_multiple_insertions_actor(
self, tree_actor: PrefixTreeActor
) -> None:
# Add tenants and insert strings in specified order
ray.get(tree_actor.add_tenants.remote(["tenant_1", "tenant_2"], 0))
ray.get(tree_actor.insert.remote("helloworld", "tenant_1", 1))
ray.get(tree_actor.insert.remote("hellothere", "tenant_2", 2))
ray.get(tree_actor.insert.remote("hellothomas", "tenant_2", 3))
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_1") == [
"",
"hello",
"world",
]
# Access tree directly
root: Node = ray.get(tree_actor.getattr.remote("root"))
# Test tree structure - validate each node
# Root node
assert root.text == ""
assert root.parent is None
assert root.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 3}
assert set(root.edge_label_to_child.keys()) == {"h"}
# Hello node
hello_node: Node = root.edge_label_to_child["h"]
assert hello_node.text == "hello"
assert hello_node.parent.text == ""
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 3}
assert set(hello_node.edge_label_to_child.keys()) == {"w", "t"}
# World node
world_node: Node = hello_node.edge_label_to_child["w"]
assert world_node.text == "world"
assert world_node.parent.text == "hello"
assert world_node.tenant_to_last_access_time == {"tenant_1": 1}
assert set(world_node.edge_label_to_child.keys()) == set()
# Th node
th_node: Node = hello_node.edge_label_to_child["t"]
assert th_node.text == "th"
assert th_node.parent.text == "hello"
assert th_node.tenant_to_last_access_time == {"tenant_2": 3}
assert set(th_node.edge_label_to_child.keys()) == {"e", "o"}
# Ere node
ere_node: Node = th_node.edge_label_to_child["e"]
assert ere_node.text == "ere"
assert ere_node.parent.text == "th"
assert ere_node.tenant_to_last_access_time == {"tenant_2": 2}
assert set(ere_node.edge_label_to_child.keys()) == set()
# Omas node
omas_node: Node = th_node.edge_label_to_child["o"]
assert omas_node.text == "omas"
assert omas_node.parent.text == "th"
assert omas_node.tenant_to_last_access_time == {"tenant_2": 3}
assert set(omas_node.edge_label_to_child.keys()) == set()
async def test_multiple_evictions_maintains_lru_order_actor(
self, tree_actor: PrefixTreeActor
) -> None:
"""Test multiple evictions maintain LRU order."""
# Add tenants and insert test data
ray.get(tree_actor.add_tenants.remote(["tenant_1", "tenant_2"], 0))
ray.get(tree_actor.insert.remote("helloworld", "tenant_1", 1))
ray.get(tree_actor.insert.remote("hellothere", "tenant_2", 2))
ray.get(tree_actor.insert.remote("hellothomas", "tenant_2", 3))
assert ray.get(tree_actor.getattr.remote("tenant_to_char_count")) == {
"tenant_1": 10,
"tenant_2": 14,
}
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_1") == [
"",
"hello",
"world",
]
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_2") == [
"",
"omas",
"th",
"hello",
"ere",
]
# Eviction 1 (tenant_1): min_remove_size=1. "hello" and "world" removed.
evicted_1 = ray.get(tree_actor.evict_tenant_by_lru.remote("tenant_1", 1))
assert evicted_1 == 10
assert ray.get(tree_actor.getattr.remote("tenant_to_char_count")) == {
"tenant_1": 0,
"tenant_2": 14,
}
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_1") == [""]
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_2") == [
"",
"omas",
"th",
"hello",
"ere",
] # T2 unchanged
# Eviction 2 (tenant_2): min_remove_size=1. "ere" is oldest timestamp, removed.
evicted_2 = ray.get(tree_actor.evict_tenant_by_lru.remote("tenant_2", 1))
assert evicted_2 == 3 # "ere" is 3 chars
assert ray.get(tree_actor.getattr.remote("tenant_to_char_count")) == {
"tenant_1": 0,
"tenant_2": 11,
} # 14 - 3
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_2") == [
"",
"omas",
"th",
"hello",
]
# Eviction 3 (tenant_2): min_remove_size=1. "omas"(ts3), "th"(ts3), "hello"(ts3) removed.
evicted_3 = ray.get(tree_actor.evict_tenant_by_lru.remote("tenant_2", 1))
assert evicted_3 == 11 # 4+2+5 chars
assert ray.get(tree_actor.getattr.remote("tenant_to_char_count")) == {
"tenant_1": 0,
"tenant_2": 0,
}
assert await get_lru_texts_from_tree_actor(tree_actor, "tenant_2") == [""]
@pytest.mark.asyncio
| TestPrefixTreeActorComprehensive |
python | ray-project__ray | python/ray/llm/_internal/serve/core/configs/openai_api_models.py | {
"start": 1364,
"end": 1482
} | class ____(vLLMChatCompletionRequest):
model_config = ConfigDict(arbitrary_types_allowed=True)
| ChatCompletionRequest |
python | kamyu104__LeetCode-Solutions | Python/check-if-there-is-a-valid-path-in-a-grid.py | {
"start": 33,
"end": 1098
} | class ____(object):
def hasValidPath(self, grid):
"""
:type grid: List[List[int]]
:rtype: bool
"""
E, S, W, N = [(0, 1), (1, 0), (0, -1), (-1, 0)]
directions = [
[W, E], [N, S],
[W, S], [S, E],
[W, N], [N, E]
]
for r, c in directions[grid[0][0]-1]:
if not (0 <= r < len(grid) and 0 <= c < len(grid[0])):
continue
pr, pc = 0, 0
while r != len(grid)-1 or c != len(grid[0])-1:
for dx, dy in directions[grid[r][c]-1]:
nr, nc = r+dx, c+dy
if (nr == pr and nc == pc) or \
not(0 <= nr < len(grid) and 0 <= nc < len(grid[0])) or \
(-dx, -dy) not in directions[grid[nr][nc]-1]:
continue
pr, pc, r, c = r, c, nr, nc
break
else:
return False
return True
return len(grid) == len(grid[0]) == 1
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-genesys/source_genesys/source.py | {
"start": 6123,
"end": 8710
} | class ____(AbstractSource):
def build_refresh_request_body(self) -> Mapping[str, Any]:
return {
"grant_type": "client_credentials",
"client_id": self.get_client_id(),
"client_secret": self.get_client_secret(),
}
def check_connection(self, logger, config) -> Tuple[bool, any]:
"""
TODO: Implement true connection checks using an endpoint that is always live
Testing connection availability for the connector by granting the credentials.
"""
return True, None
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
GENESYS_REGION_DOMAIN_MAP: Dict[str, str] = {
"Americas (US East)": "mypurecloud.com",
"Americas (US East 2)": "use2.us-gov-pure.cloud",
"Americas (US West)": "usw2.pure.cloud",
"Americas (Canada)": "cac1.pure.cloud",
"Americas (São Paulo)": "sae1.pure.cloud",
"EMEA (Frankfurt)": "mypurecloud.de",
"EMEA (Dublin)": "mypurecloud.ie",
"EMEA (London)": "euw2.pure.cloud",
"Asia Pacific (Mumbai)": "aps1.pure.cloud",
"Asia Pacific (Seoul)": "apne2.pure.cloud",
"Asia Pacific (Sydney)": "mypurecloud.com.au",
}
domain = GENESYS_REGION_DOMAIN_MAP.get(config["tenant_endpoint"])
base_url = f"https://login.{domain}"
api_base_url = f"https://api.{domain}"
args = {
"api_base_url": api_base_url,
"authenticator": GenesysOAuthAuthenticator(base_url, config["client_id"], config["client_secret"]),
}
# response = self.get_connection_response(config)
# response.raise_for_status()
# args = {"authenticator": TokenAuthenticator(response.json()["access_token"])}
return [
RoutingOutboundEvents(**args),
RoutingRoutingAssessments(**args),
RoutingRoutingQueues(**args),
TelephonyLocations(**args),
TelephonyProvidersEdges(**args),
TelephonyProvidersEdgesDids(**args),
TelephonyProvidersEdgesDidpools(**args),
TelephonyProvidersEdgesExtensions(**args),
TelephonyProvidersEdgesLines(**args),
TelephonyProvidersEdgesOutboundroutes(**args),
TelephonyProvidersEdgesPhones(**args),
TelephonyProvidersEdgesSites(**args),
TelephonyProvidersEdgesTrunks(**args),
TelephonyStations(**args),
UserGroups(**args),
UserUsers(**args),
]
| SourceGenesys |
python | django__django | tests/check_framework/test_security.py | {
"start": 8281,
"end": 9723
} | class ____(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600,
)
def test_no_sts_subdomains(self):
"""
Warn if SECURE_HSTS_INCLUDE_SUBDOMAINS isn't True.
"""
self.assertEqual(base.check_sts_include_subdomains(None), [base.W005])
@override_settings(
MIDDLEWARE=[],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600,
)
def test_no_sts_subdomains_no_middleware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(base.check_sts_include_subdomains(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None,
)
def test_no_sts_subdomains_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(base.check_sts_include_subdomains(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=True,
SECURE_HSTS_SECONDS=3600,
)
def test_with_sts_subdomains(self):
self.assertEqual(base.check_sts_include_subdomains(None), [])
| CheckStrictTransportSecuritySubdomainsTest |
python | pypa__warehouse | tests/unit/oidc/models/test_gitlab.py | {
"start": 2381,
"end": 35837
} | class ____:
@pytest.mark.parametrize("environment", [None, "some_environment"])
def test_lookup_fails_invalid_ci_config_ref_uri(self, environment):
signed_claims = {
"iss": "https://gitlab.com",
"project_path": "foo/bar",
"ci_config_ref_uri": ("gitlab.com/foo/bar//example/.yml@refs/heads/main"),
}
if environment:
signed_claims["environment"] = environment
# The `ci_config_ref_uri` is malformed, so no queries are performed.
with pytest.raises(
errors.InvalidPublisherError,
match="Could not extract workflow filename from OIDC claims",
):
gitlab.GitLabPublisher.lookup_by_claims(pretend.stub(), signed_claims)
@pytest.mark.parametrize(
("configured_namespace", "configured_project", "project_path"),
[
(
"Foo",
"Bar",
"foo/bar",
),
(
"foo",
"bar",
"Foo/Bar",
),
],
)
def test_lookup_succeeds_with_mixed_case_project_path(
self, db_request, configured_namespace, configured_project, project_path
):
# Test that we find a matching publisher when the project_path claims match
# even if the case is different.
stored_publisher = GitLabPublisherFactory(
namespace=configured_namespace,
project=configured_project,
workflow_filepath=".gitlab-ci.yml",
environment="",
)
signed_claims = {
"iss": "https://gitlab.com",
"project_path": project_path,
"ci_config_ref_uri": "gitlab.com/foo/bar//.gitlab-ci.yml@refs/heads/main",
"environment": "some_environment",
}
publisher = gitlab.GitLabPublisher.lookup_by_claims(
db_request.db, signed_claims
)
assert publisher.id == stored_publisher.id
assert publisher.environment == stored_publisher.environment
@pytest.mark.parametrize("environment", ["SomeEnvironment", "SOME_ENVIRONMENT"])
def test_lookup_succeeds_with_non_lowercase_environment(
self, db_request, environment
):
# Test that we find a matching publisher when the environment claims match
# If we incorrectly normalized the incoming capitalized claim, we wouldn't
# find the matching publisher.
stored_publisher = GitLabPublisherFactory(
id="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
namespace="foo",
project="bar",
workflow_filepath=".gitlab-ci.yml",
environment=environment,
)
signed_claims = {
"iss": "https://gitlab.com",
"project_path": "foo/bar",
"ci_config_ref_uri": ("gitlab.com/foo/bar//.gitlab-ci.yml@refs/heads/main"),
"environment": environment,
}
publisher = gitlab.GitLabPublisher.lookup_by_claims(
db_request.db, signed_claims
)
assert publisher.id == stored_publisher.id
assert publisher.environment == environment
@pytest.mark.parametrize("environment", ["SomeEnvironment", "SOME_ENVIRONMENT"])
def test_lookup_is_case_sensitive_for_environment(self, db_request, environment):
# Test that we don't find a matching publisher when the environment claims don't
# exactly match.
# If we incorrectly normalized the incoming capitalized claim, we would match
# a publisher that has a different environment.
GitLabPublisherFactory(
id="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
namespace="foo",
project="bar",
workflow_filepath=".gitlab-ci.yml",
# stored environment is all lowercase, doesn't match incoming claims
environment=environment.lower(),
)
signed_claims = {
"iss": "https://gitlab.com",
"project_path": "foo/bar",
"ci_config_ref_uri": ("gitlab.com/foo/bar//.gitlab-ci.yml@refs/heads/main"),
"environment": environment,
}
with pytest.raises(errors.InvalidPublisherError) as e:
gitlab.GitLabPublisher.lookup_by_claims(db_request.db, signed_claims)
assert str(e.value) == "Publisher with matching claims was not found"
@pytest.mark.parametrize("environment", ["", "some_environment"])
@pytest.mark.parametrize(
("workflow_filepath_a", "workflow_filepath_b"),
[
("workflows/release_pypi/ci.yml", "workflows/release-pypi/ci.yml"),
("workflows/release%pypi/ci.yml", "workflows/release-pypi/ci.yml"),
],
)
def test_lookup_escapes(
self, db_request, environment, workflow_filepath_a, workflow_filepath_b
):
GitLabPublisherFactory(
id="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
namespace="foo",
project="bar",
workflow_filepath=workflow_filepath_a,
environment=environment,
)
GitLabPublisherFactory(
id="bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
namespace="foo",
project="bar",
workflow_filepath=workflow_filepath_b,
environment=environment,
)
for workflow_filepath in (workflow_filepath_a, workflow_filepath_b):
signed_claims = {
"iss": "https://gitlab.com",
"project_path": "foo/bar",
"ci_config_ref_uri": (
f"gitlab.com/foo/bar//{workflow_filepath}@refs/heads/main"
),
}
if environment:
signed_claims["environment"] = environment
assert (
gitlab.GitLabPublisher.lookup_by_claims(
db_request.db, signed_claims
).workflow_filepath
== workflow_filepath
)
def test_lookup_no_matching_publisher(self, db_request):
signed_claims = {
"iss": "https://gitlab.com",
"project_path": "foo/bar",
"ci_config_ref_uri": ("gitlab.com/foo/bar//.gitlab-ci.yml@refs/heads/main"),
}
with pytest.raises(errors.InvalidPublisherError) as e:
gitlab.GitLabPublisher.lookup_by_claims(db_request.db, signed_claims)
assert str(e.value) == "Publisher with matching claims was not found"
def test_gitlab_publisher_all_known_claims(self):
assert gitlab.GitLabPublisher.all_known_claims() == {
# required verifiable claims
"sub",
"project_path",
"ci_config_ref_uri",
# required unverifiable claims
"ref_path",
"sha",
# optional verifiable claims
"environment",
# preverified claims
"iss",
"iat",
"nbf",
"exp",
"aud",
"jti",
# unchecked claims
"project_id",
"namespace_id",
"namespace_path",
"user_id",
"user_login",
"user_email",
"user_identities",
"pipeline_id",
"pipeline_source",
"job_id",
"ref",
"ref_type",
"ref_protected",
"environment_protected",
"deployment_tier",
"environment_action",
"runner_id",
"runner_environment",
"ci_config_sha",
"project_visibility",
"user_access_level",
"groups_direct",
"job_namespace_id",
"job_namespace_path",
"job_project_id",
"job_project_path",
}
def test_gitlab_publisher_computed_properties(self):
publisher = gitlab.GitLabPublisher(
project="fakerepo",
namespace="fakeowner",
workflow_filepath="subfolder/fakeworkflow.yml",
environment="fakeenv",
issuer_url="https://gitlab.com",
)
for claim_name in publisher.__required_verifiable_claims__.keys():
assert getattr(publisher, claim_name) is not None
assert str(publisher) == "subfolder/fakeworkflow.yml"
assert publisher.publisher_base_url == "https://gitlab.com/fakeowner/fakerepo"
assert publisher.publisher_url() == "https://gitlab.com/fakeowner/fakerepo"
assert (
publisher.publisher_url({"sha": "somesha"})
== "https://gitlab.com/fakeowner/fakerepo/commit/somesha"
)
assert publisher.stored_claims({"sha": "somesha", "ref_path": "someref"}) == {
"sha": "somesha",
"ref_path": "someref",
}
def test_gitlab_publisher_admin_details_with_environment(self):
publisher = gitlab.GitLabPublisher(
project="fakerepo",
namespace="fakeowner",
workflow_filepath="subfolder/fakeworkflow.yml",
environment="fakeenv",
issuer_url="https://gitlab.com",
)
assert publisher.admin_details == [
("Project", "fakeowner/fakerepo"),
("Workflow", "subfolder/fakeworkflow.yml"),
("Issuer URL", "https://gitlab.com"),
("Environment", "fakeenv"),
]
def test_gitlab_publisher_admin_details_without_environment(self):
publisher = gitlab.GitLabPublisher(
project="fakerepo",
namespace="fakeowner",
workflow_filepath="subfolder/fakeworkflow.yml",
environment="",
issuer_url="https://gitlab.com",
)
assert publisher.admin_details == [
("Project", "fakeowner/fakerepo"),
("Workflow", "subfolder/fakeworkflow.yml"),
("Issuer URL", "https://gitlab.com"),
]
def test_gitlab_publisher_unaccounted_claims(self, monkeypatch):
scope = pretend.stub()
sentry_sdk = pretend.stub(
capture_message=pretend.call_recorder(lambda s: None),
new_scope=pretend.call_recorder(
lambda: pretend.stub(
__enter__=lambda *a: scope, __exit__=lambda *a: None
)
),
)
monkeypatch.setattr(_core, "sentry_sdk", sentry_sdk)
# We don't care if these actually verify, only that they're present.
signed_claims = {
claim_name: "fake"
for claim_name in gitlab.GitLabPublisher.all_known_claims()
}
signed_claims["fake-claim"] = "fake"
signed_claims["another-fake-claim"] = "also-fake"
gitlab.GitLabPublisher.check_claims_existence(signed_claims)
assert sentry_sdk.capture_message.calls == [
pretend.call(
"JWT for GitLabPublisher has unaccounted claims: "
"['another-fake-claim', 'fake-claim']"
)
]
assert scope.fingerprint == ["another-fake-claim", "fake-claim"]
@pytest.mark.parametrize(
"missing",
gitlab.GitLabPublisher.__required_verifiable_claims__.keys()
| gitlab.GitLabPublisher.__required_unverifiable_claims__,
)
def test_gitlab_publisher_missing_claims(self, monkeypatch, missing):
publisher = gitlab.GitLabPublisher(
project="fakerepo",
namespace="fakeowner",
workflow_filepath="subfolder/fakeworkflow.yml",
issuer_url="https://gitlab.com",
)
scope = pretend.stub()
sentry_sdk = pretend.stub(
capture_message=pretend.call_recorder(lambda s: None),
new_scope=pretend.call_recorder(
lambda: pretend.stub(
__enter__=lambda *a: scope, __exit__=lambda *a: None
)
),
)
monkeypatch.setattr(_core, "sentry_sdk", sentry_sdk)
signed_claims = {
claim_name: "fake"
for claim_name in gitlab.GitLabPublisher.all_known_claims()
}
# Pop the missing claim, so that it's missing.
signed_claims.pop(missing)
assert missing not in signed_claims
assert publisher.__required_verifiable_claims__
with pytest.raises(errors.InvalidPublisherError) as e:
gitlab.GitLabPublisher.check_claims_existence(signed_claims)
assert str(e.value) == f"Missing claim {missing!r}"
assert sentry_sdk.capture_message.calls == [
pretend.call(f"JWT for GitLabPublisher is missing claim: {missing}")
]
assert scope.fingerprint == [missing]
def test_gitlab_publisher_missing_optional_claims(self, monkeypatch):
publisher = gitlab.GitLabPublisher(
project="fakerepo",
namespace="fakeowner",
workflow_filepath="subfolder/fakeworkflow.yml",
environment="some-environment", # The optional claim that should be present
issuer_url="https://gitlab.com",
)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(_core, "sentry_sdk", sentry_sdk)
service = pretend.stub(
jwt_identifier_exists=pretend.call_recorder(lambda s: False)
)
signed_claims = {
claim_name: getattr(publisher, claim_name)
for claim_name in gitlab.GitLabPublisher.__required_verifiable_claims__
}
signed_claims["ref_path"] = "ref"
signed_claims["sha"] = "sha"
signed_claims["ci_config_ref_uri"] = publisher.ci_config_ref_uri + "@ref"
assert publisher.__required_verifiable_claims__
with pytest.raises(errors.InvalidPublisherError) as e:
publisher.verify_claims(
signed_claims=signed_claims, publisher_service=service
)
assert str(e.value) == "Check failed for optional claim 'environment'"
assert sentry_sdk.capture_message.calls == []
@pytest.mark.parametrize("environment", [None, "some-environment"])
@pytest.mark.parametrize(
"missing_claims",
[set(), gitlab.GitLabPublisher.__optional_verifiable_claims__.keys()],
)
def test_gitlab_publisher_verifies(self, monkeypatch, environment, missing_claims):
publisher = gitlab.GitLabPublisher(
project="fakerepo",
namespace="fakeowner",
workflow_filepath="subfolder/fakeworkflow.yml",
environment="environment",
issuer_url="https://gitlab.com",
)
noop_check = pretend.call_recorder(lambda gt, sc, ac, **kwargs: True)
verifiable_claims = {
claim_name: noop_check
for claim_name in publisher.__required_verifiable_claims__
}
monkeypatch.setattr(
publisher, "__required_verifiable_claims__", verifiable_claims
)
optional_verifiable_claims = {
claim_name: noop_check
for claim_name in publisher.__optional_verifiable_claims__
}
monkeypatch.setattr(
publisher, "__optional_verifiable_claims__", optional_verifiable_claims
)
signed_claims = {
claim_name: "fake"
for claim_name in gitlab.GitLabPublisher.all_known_claims()
if claim_name not in missing_claims
}
assert publisher.verify_claims(
signed_claims=signed_claims, publisher_service=pretend.stub()
)
assert len(noop_check.calls) == len(verifiable_claims) + len(
optional_verifiable_claims
)
@pytest.mark.parametrize(
("truth", "claim", "valid"),
[
# invalid: claim should never be empty or missing
("", None, False),
("foo/bar", None, False),
("", "", False),
("foo/bar", "", False),
# valid: exact and case-insensitive matches
("foo/bar", "foo/bar", True),
("Foo/bar", "foo/bar", True),
("Foo/bar", "Foo/bar", True),
("foo/bar", "Foo/bar", True),
("FOO/bar", "foo/bar", True),
("foo/bar", "FOO/bar", True),
("foo/Bar", "foo/bar", True),
("foo/Bar", "Foo/Bar", True),
("foo/bar", "foo/Bar", True),
("foo/BAR", "foo/bar", True),
("foo/bar", "foo/BAR", True),
],
)
def test_check_project_path(self, truth, claim, valid):
check = gitlab.GitLabPublisher.__required_verifiable_claims__["project_path"]
assert check(truth, claim, pretend.stub()) == valid
@pytest.mark.parametrize(
("claim", "ref_path", "sha", "valid", "expected"),
[
# okay: workflow name, followed by a nonempty ref_path
(
"gitlab.com/foo/bar//workflows/baz.yml@refs/tags/v0.0.1",
"refs/tags/v0.0.1",
"somesha",
True,
None,
),
(
"gitlab.com/foo/bar//workflows/baz.yml@refs/pulls/6",
"refs/pulls/6",
"somesha",
True,
None,
),
(
"gitlab.com/foo/bar//workflows/baz.yml@refs/heads/main",
"refs/heads/main",
"somesha",
True,
None,
),
(
"gitlab.com/foo/bar//workflows/baz.yml@notrailingslash",
"notrailingslash",
"somesha",
True,
None,
),
# okay: workflow name, followed by a nonempty sha
(
"gitlab.com/foo/bar//workflows/baz.yml@somesha",
"someref",
"somesha",
True,
None,
),
# bad: either ref_path or sha empty
(
"gitlab.com/foo/bar//workflows/baz.yml@somesha",
None,
"somesha",
False,
"The ref_path and sha claims are empty",
),
(
"gitlab.com/foo/bar//workflows/baz.yml@somesha",
"",
"somesha",
False,
"The ref_path and sha claims are empty",
),
(
"gitlab.com/foo/bar//workflows/baz.yml@missing",
"someref",
None,
False,
"The ref_path and sha claims are empty",
),
(
"gitlab.com/foo/bar//workflows/baz.yml@missing",
"someref",
"",
False,
"The ref_path and sha claims are empty",
),
# bad: both ref_path and sha are missing
(
"gitlab.com/foo/bar//workflows/baz.yml@missing",
None,
None,
False,
"The ref_path and sha claims are empty",
),
(
"gitlab.com/foo/bar//workflows/baz.yml@missing",
"",
"",
False,
"The ref_path and sha claims are empty",
),
# bad: workflow name with various attempted impersonations on the ref_path
(
"gitlab.com/foo/bar//workflows/baz.yml@fake.yml@notrailingslash",
"somesha",
"notrailingslash",
False,
"The ci_config_ref_uri claim does not match, expecting one of "
"['gitlab.com/foo/bar//workflows/baz.yml@notrailingslash', "
"'gitlab.com/foo/bar//workflows/baz.yml@somesha'], "
"got 'gitlab.com/foo/bar//workflows/baz.yml@fake.yml@notrailingslash'",
),
(
"gitlab.com/foo/bar//workflows/baz.yml@fake.yml@refs/pulls/6",
"somesha",
"refs/pulls/6",
False,
"The ci_config_ref_uri claim does not match, expecting one of "
"['gitlab.com/foo/bar//workflows/baz.yml@refs/pulls/6', "
"'gitlab.com/foo/bar//workflows/baz.yml@somesha'], "
"got 'gitlab.com/foo/bar//workflows/baz.yml@fake.yml@refs/pulls/6'",
),
# bad: missing tail or workflow name or otherwise partial
(
"gitlab.com/foo/bar//workflows/baz.yml@",
"somesha",
"notrailingslash",
False,
"The ci_config_ref_uri claim does not match, expecting one of "
"['gitlab.com/foo/bar//workflows/baz.yml@notrailingslash', "
"'gitlab.com/foo/bar//workflows/baz.yml@somesha'], "
"got 'gitlab.com/foo/bar//workflows/baz.yml@'",
),
(
"gitlab.com/foo/bar//workflows/@",
"somesha",
"notrailingslash",
False,
"The ci_config_ref_uri claim does not match, expecting one of "
"['gitlab.com/foo/bar//workflows/baz.yml@notrailingslash', "
"'gitlab.com/foo/bar//workflows/baz.yml@somesha'], "
"got 'gitlab.com/foo/bar//workflows/@'",
),
(
"gitlab.com/foo/bar//workflows/",
"somesha",
"notrailingslash",
False,
"The ci_config_ref_uri claim does not match, expecting one of "
"['gitlab.com/foo/bar//workflows/baz.yml@notrailingslash', "
"'gitlab.com/foo/bar//workflows/baz.yml@somesha'], "
"got 'gitlab.com/foo/bar//workflows/'",
),
(
"baz.yml",
"somesha",
"notrailingslash",
False,
"The ci_config_ref_uri claim does not match, expecting one of "
"['gitlab.com/foo/bar//workflows/baz.yml@notrailingslash', "
"'gitlab.com/foo/bar//workflows/baz.yml@somesha'], "
"got 'baz.yml'",
),
(
"gitlab.com/foo/bar//workflows/baz.yml@malicious.yml@",
"somesha",
"notrailingslash",
False,
"The ci_config_ref_uri claim does not match, expecting one of "
"['gitlab.com/foo/bar//workflows/baz.yml@notrailingslash', "
"'gitlab.com/foo/bar//workflows/baz.yml@somesha'], "
"got 'gitlab.com/foo/bar//workflows/baz.yml@malicious.yml@'",
),
(
"gitlab.com/foo/bar//workflows/baz.yml@@",
"somesha",
"notrailingslash",
False,
"The ci_config_ref_uri claim does not match, expecting one of "
"['gitlab.com/foo/bar//workflows/baz.yml@notrailingslash', "
"'gitlab.com/foo/bar//workflows/baz.yml@somesha'], "
"got 'gitlab.com/foo/bar//workflows/baz.yml@@'",
),
("", None, None, False, "The ci_config_ref_uri claim is empty"),
],
)
def test_gitlab_publisher_ci_config_ref_uri(
self, claim, ref_path, sha, valid, expected
):
publisher = gitlab.GitLabPublisher(
project="bar",
namespace="foo",
workflow_filepath="workflows/baz.yml",
issuer_url="https://gitlab.com",
)
check = gitlab.GitLabPublisher.__required_verifiable_claims__[
"ci_config_ref_uri"
]
claims = {"ref_path": ref_path, "sha": sha}
if valid:
assert check(publisher.ci_config_ref_uri, claim, claims) is True
else:
with pytest.raises(errors.InvalidPublisherError) as e:
check(publisher.ci_config_ref_uri, claim, claims) is True
assert str(e.value) == expected
@pytest.mark.parametrize(
("truth", "claim", "valid"),
[
("repo:foo/bar", "repo:foo/bar:someotherstuff", True),
("repo:foo/bar", "repo:foo/bar:", True),
("repo:fOo/BaR", "repo:foo/bar", True),
("repo:foo/bar", "repo:fOo/BaR:", True),
("repo:foo/bar:someotherstuff", "repo:foo/bar", False),
("repo:foo/bar-baz", "repo:foo/bar", False),
("repo:foo/bar", "repo:foo/bar-baz", False),
],
)
def test_gitlab_publisher_sub_claim(self, truth, claim, valid):
check = gitlab.GitLabPublisher.__required_verifiable_claims__["sub"]
assert check(truth, claim, pretend.stub()) is valid
@pytest.mark.parametrize(
("truth", "claim", "valid"),
[
("", None, True),
("", "", True),
("", "some-environment", True),
("some-environment", "some-environment", True),
("some-environment", "sOmE-eNvIrOnMeNt", False),
("some-environment", None, False),
("some-environment", "some-other-environment", False),
],
)
def test_gitlab_publisher_environment_claim(self, truth, claim, valid):
check = gitlab.GitLabPublisher.__optional_verifiable_claims__["environment"]
assert check(truth, claim, pretend.stub()) is valid
def test_gitlab_publisher_duplicates_cant_be_created(self, db_request):
publisher1 = gitlab.GitLabPublisher(
project="repository_name",
namespace="repository_owner",
workflow_filepath="subfolder/worflow_filename.yml",
environment="",
issuer_url="https://gitlab.com",
)
db_request.db.add(publisher1)
db_request.db.commit()
publisher2 = gitlab.GitLabPublisher(
project="repository_name",
namespace="repository_owner",
workflow_filepath="subfolder/worflow_filename.yml",
environment="",
issuer_url="https://gitlab.com",
)
db_request.db.add(publisher2)
with pytest.raises(psycopg.errors.UniqueViolation):
db_request.db.commit()
@pytest.mark.parametrize(
("project_name", "namespace", "url", "expected"),
[
(
PROJECT_NAME,
NAMESPACE,
f"https://gitlab.com/{NAMESPACE}/{PROJECT_NAME}.git",
True,
),
(
"Project_Name",
NAMESPACE,
f"https://gitlab.com/{NAMESPACE}/{PROJECT_NAME}.git",
True,
),
(
PROJECT_NAME,
"Project_Owner",
f"https://gitlab.com/{NAMESPACE}/{PROJECT_NAME}.git",
True,
),
(
PROJECT_NAME,
NAMESPACE,
f"https://gitlab.com/{NAMESPACE}/{PROJECT_NAME}.git/",
True,
),
(
PROJECT_NAME,
NAMESPACE,
f"https://gitlab.com/{NAMESPACE}/{PROJECT_NAME}.git/issues",
False,
),
(
PROJECT_NAME,
NAMESPACE,
f"https://{NAMESPACE}.gitlab.io/{PROJECT_NAME}/",
True,
),
(
PROJECT_NAME,
NAMESPACE,
f"https://{NAMESPACE}.gitlab.io/{PROJECT_NAME}/subpage/",
True,
),
(
PROJECT_NAME,
"owner.with.dot",
f"https://owner.with.dot.gitlab.io/{PROJECT_NAME}",
True,
),
(
PROJECT_NAME,
NAMESPACE,
f"https://gitlab.com/{NAMESPACE.replace('e', 'E')}/"
f"{PROJECT_NAME.replace('r', 'R')}/",
True,
),
( # Unique domains are not supported
PROJECT_NAME,
NAMESPACE,
f"https://{PROJECT_NAME}-123456.gitlab.io/",
False,
),
# Project name is not properly formed
(PROJECT_NAME, NAMESPACE, f"https://{NAMESPACE}.gitlab.io/", False),
(
f"{NAMESPACE}.gitlab.io",
NAMESPACE,
f"https://{NAMESPACE}.gitlab.io",
True,
),
(
f"{NAMESPACE}.gitlab.io",
NAMESPACE,
f"https://{NAMESPACE}.gitlab.io/",
True,
),
(
f"{NAMESPACE}.gitlab.io",
NAMESPACE,
f"https://{NAMESPACE}.gitlab.io/subpage",
True,
),
( # Only for user/group own pages
"project_name.gitlab.io",
NAMESPACE,
f"https://{NAMESPACE}.gitlab.io/subpage",
False,
),
(
"project",
"group/subgroup",
"https://group.gitlab.io/subgroup/project/",
True,
),
(
"project",
"group/subgroup",
"https://group.gitlab.io/subgroup/project/about",
True,
),
# The namespace should only contain 1 element
("group.gitlab.io", "group/subgroup", "https://group.gitlab.io/", False),
],
)
def test_gitlab_publisher_verify_url(
self, project_name: str, namespace: str, url: str, expected: bool
):
publisher = gitlab.GitLabPublisher(
project=project_name,
namespace=namespace,
workflow_filepath="workflow_filename.yml",
environment="",
issuer_url="https://gitlab.com",
)
assert publisher.verify_url(url) == expected
@pytest.mark.parametrize("environment", ["", "some-env"])
def test_gitlab_publisher_attestation_identity(self, environment):
publisher = gitlab.GitLabPublisher(
project="project",
namespace="group/subgroup",
workflow_filepath="workflow_filename.yml",
environment=environment,
issuer_url="https://gitlab.com",
)
identity = publisher.attestation_identity
assert identity is not None
assert identity.repository == publisher.project_path
assert identity.workflow_filepath == publisher.workflow_filepath
if not environment:
assert identity.environment is None
else:
assert identity.environment == publisher.environment
@pytest.mark.parametrize("exists_in_db", [True, False])
def test_exists(self, db_request, exists_in_db):
publisher = gitlab.GitLabPublisher(
project="repository_name",
namespace="repository_owner",
workflow_filepath="subfolder/worflow_filename.yml",
environment="",
issuer_url="https://gitlab.com",
)
if exists_in_db:
db_request.db.add(publisher)
db_request.db.flush()
assert publisher.exists(db_request.db) == exists_in_db
def test_get_available_issuer_urls_default(self):
"""By default, there's a single known GitLab issuer URL."""
issuer_urls = gitlab.GitLabPublisher.get_available_issuer_urls()
assert issuer_urls == ["https://gitlab.com"]
def test_get_available_issuer_urls_custom(self, db_session):
"""If a custom GitLab issuer URL is configured for the org, it is included."""
org_oidc_issuer = OrganizationOIDCIssuerFactory(issuer_type="gitlab")
issuer_urls = gitlab.GitLabPublisher.get_available_issuer_urls(
org_oidc_issuer.organization
)
assert issuer_urls == ["https://gitlab.com", org_oidc_issuer.issuer_url]
def test_get_available_issuer_urls_multiple_custom(self, db_session):
"""
If multiple custom GitLab issuer URLs are configured for the org,
they are all included, and sorted alphabetically after the default.
"""
org_oidc_issuer1 = OrganizationOIDCIssuerFactory(
issuer_type="gitlab", issuer_url="https://zzz.example.com"
)
org_oidc_issuer2 = OrganizationOIDCIssuerFactory(
organization=org_oidc_issuer1.organization,
issuer_type="gitlab",
issuer_url="https://aaa.example.com",
)
issuer_urls = gitlab.GitLabPublisher.get_available_issuer_urls(
org_oidc_issuer1.organization
)
assert issuer_urls == [
"https://gitlab.com",
org_oidc_issuer2.issuer_url,
org_oidc_issuer1.issuer_url,
]
def test_get_available_issuer_urls_custom_non_gitlab(self, db_session):
"""
If a custom OIDC issuer URL of a different type is configured for the org,
it is not included.
"""
org_oidc_issuer = OrganizationOIDCIssuerFactory(issuer_type="github")
issuer_urls = gitlab.GitLabPublisher.get_available_issuer_urls(
org_oidc_issuer.organization
)
assert issuer_urls == ["https://gitlab.com"]
| TestGitLabPublisher |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.