language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tiangolo__fastapi | docs_src/body/tutorial001_py310.py | {
"start": 61,
"end": 271
} | class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
app = FastAPI()
@app.post("/items/")
async def create_item(item: Item):
return item
| Item |
python | huggingface__transformers | src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py | {
"start": 1349,
"end": 2080
} | class ____(ModelOutput):
"""
Output type of [` Wav2Vec2PhonemeCTCTokenizer`], with transcription.
Args:
text (list of `str` or `str`):
Decoded logits in text from. Usually the speech transcription.
char_offsets (list of `list[dict[str, Union[int, str]]]` or `list[dict[str, Union[int, str]]]`):
Offsets of the decoded characters. In combination with sampling rate and model downsampling rate char
offsets can be used to compute time stamps for each character. Total logit score of the beam associated with
produced text.
"""
text: Union[list[str], str]
char_offsets: Union[list[ListOfDict], ListOfDict] = None
| Wav2Vec2PhonemeCTCTokenizerOutput |
python | langchain-ai__langchain | libs/core/langchain_core/messages/content.py | {
"start": 7359,
"end": 8469
} | class ____(TypedDict):
"""Text output from a LLM.
This typically represents the main text content of a message, such as the response
from a language model or the text of a user message.
!!! note "Factory function"
`create_text_block` may also be used as a factory to create a
`TextContentBlock`. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["text"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
text: str
"""Block text."""
annotations: NotRequired[list[Annotation]]
"""`Citation`s and other annotations."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
| TextContentBlock |
python | getsentry__sentry | src/sentry/workflow_engine/service/action/service.py | {
"start": 437,
"end": 2333
} | class ____(RpcService):
key = "workflow_engine_action"
local_mode = SiloMode.REGION
@classmethod
def get_local_implementation(cls) -> RpcService:
from sentry.workflow_engine.service.action.impl import DatabaseBackedActionService
return DatabaseBackedActionService()
@regional_rpc_method(resolve=ByOrganizationId())
@abc.abstractmethod
def delete_actions_for_organization_integration(
self, *, organization_id: int, integration_id: int
) -> None:
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abc.abstractmethod
def update_action_status_for_organization_integration(
self, *, organization_id: int, integration_id: int, status: int
) -> None:
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abc.abstractmethod
def update_action_status_for_sentry_app_via_uuid(
self,
*,
organization_id: int,
status: int,
sentry_app_install_uuid: str,
) -> None:
pass
@regional_rpc_method(resolve=ByRegionName())
@abc.abstractmethod
def update_action_status_for_sentry_app_via_uuid__region(
self,
*,
region_name: str,
status: int,
sentry_app_install_uuid: str,
) -> None:
pass
@regional_rpc_method(resolve=ByRegionName())
@abc.abstractmethod
def update_action_status_for_sentry_app_via_sentry_app_id(
self,
*,
region_name: str,
status: int,
sentry_app_id: int,
) -> None:
pass
@regional_rpc_method(resolve=ByRegionName())
@abc.abstractmethod
def update_action_status_for_webhook_via_sentry_app_slug(
self,
*,
region_name: str,
status: int,
sentry_app_slug: str,
) -> None:
pass
action_service = ActionService.create_delegation()
| ActionService |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 100109,
"end": 101039
} | class ____(sgqlc.types.Enum):
"""Software or company that hosts social media accounts.
Enumeration Choices:
* `FACEBOOK`: Social media and networking website.
* `GENERIC`: Catch-all for social media providers that do not yet
have specific handling.
* `HOMETOWN`: Fork of Mastodon with a greater focus on local
posting.
* `INSTAGRAM`: Social media website with a focus on photo and
video sharing.
* `LINKEDIN`: Professional networking website.
* `MASTODON`: Open-source federated microblogging service.
* `REDDIT`: Social news aggregation and discussion website.
* `TWITCH`: Live-streaming service.
* `TWITTER`: Microblogging website.
* `YOUTUBE`: Online video platform.
"""
__schema__ = github_schema
__choices__ = ("FACEBOOK", "GENERIC", "HOMETOWN", "INSTAGRAM", "LINKEDIN", "MASTODON", "REDDIT", "TWITCH", "TWITTER", "YOUTUBE")
| SocialAccountProvider |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 9055,
"end": 9161
} | class ____(A):
def test(self, t: Iterable[int], bbb: str = "") -> Sequence[str]: ...
| GeneralizedArgument |
python | facebook__pyre-check | tools/upgrade/commands/configurationless.py | {
"start": 1045,
"end": 13838
} | class ____:
global_configuration: Configuration
local_configuration: Configuration
includes: Collection[str] = dataclasses.field(
default_factory=lambda: DEFAULT_INCLUDES
)
isolation_dir: Optional[str] = dataclasses.field(default=None)
@cached_property
def ignore_all_errors_prefixes(self) -> Collection[Path]:
return (
self.global_configuration.get_resolved_ignore_path_prefixes()
| self.local_configuration.get_resolved_ignore_path_prefixes()
)
@cached_property
def exclude_patterns(self) -> Collection[re.Pattern[str]]:
return (
self.global_configuration.get_exclude_as_patterns()
| self.local_configuration.get_exclude_as_patterns()
)
@cached_property
def default_global_mode(self) -> filesystem.LocalMode:
global_is_strict = (
self.global_configuration.strict
if self.global_configuration.strict is not None
else False # set default configuration strictness to UNSAFE
)
return (
filesystem.LocalMode.STRICT
if global_is_strict
else filesystem.LocalMode.UNSAFE
)
@cached_property
def default_local_mode(self) -> filesystem.LocalMode:
default_project_strictness_setting = self.local_configuration.strict
if default_project_strictness_setting is None:
return self.default_global_mode
elif default_project_strictness_setting:
return filesystem.LocalMode.STRICT
else:
return filesystem.LocalMode.UNSAFE
@cached_property
def buck_root(self) -> Path:
return self.get_buck_root()
@cached_property
def included_files(self) -> Set[Path]:
files_to_migrate = self.get_files_from_local_configuration(
self.local_configuration, self.buck_root
)
already_migrated_files = self.get_already_migrated_files(self.buck_root)
LOG.info(f"Found {len(files_to_migrate)} files to migrate")
LOG.debug(f"Files to migrate\n:{[str(file) for file in files_to_migrate]}")
LOG.info(f"Found {len(already_migrated_files)} already migrated files")
LOG.debug(
f"Files already migrated\n:{[str(file) for file in already_migrated_files]}"
)
files_to_migrate -= already_migrated_files
LOG.info(
f"Found {len(files_to_migrate)} files to migrate, not including files already migrated"
)
return files_to_migrate
def __str__(self) -> str:
local_path = str(self.local_configuration.get_path())
global_path = str(self.global_configuration.get_path())
return f"ConfigurationlessOptions(local={local_path}, global={global_path})"
def get_file_mode_to_apply(
self,
file: Path,
) -> filesystem.LocalMode:
file = file.resolve()
default_local_mode = self.default_local_mode
if any(
path_is_relative_to(file, ignore_prefix)
for ignore_prefix in self.ignore_all_errors_prefixes
):
return filesystem.LocalMode.IGNORE
else:
return default_local_mode
def _get_isolation_dir(self) -> List[str]:
if self.isolation_dir is not None:
return ["--isolation-dir", self.isolation_dir]
else:
return []
def get_buck_root(self) -> Path:
try:
root = Path(
subprocess.check_output(
["buck2", *self._get_isolation_dir(), "root", "--kind", "project"],
text=True,
cwd=self.local_configuration.root,
).strip()
)
LOG.info(f"buck2 root is {str(root)}")
except FileNotFoundError as e:
raise ValueError(
"Could not find `buck2` executable when `targets` were specified in local configuration."
) from e
return root
@staticmethod
def format_buck_targets_for_query(targets: Collection[str]) -> List[str]:
targets = [
target_expression
for target in targets
for target_expression in ["--target", target]
]
return targets
def _get_applicable_targets_from_wildcard_targets_buck_query(
self, targets: Collection[str]
) -> Collection[str]:
targets = self.format_buck_targets_for_query(targets)
buck_command = [
"buck2",
*self._get_isolation_dir(),
"bxl",
"prelude//python/sourcedb/filter.bxl:filter",
"--",
*targets,
]
LOG.info(
f"Finding targets from wildcard expression with buck2 command: `{buck_command}`"
)
raw_result = subprocess.check_output(
buck_command,
text=True,
cwd=self.local_configuration.root,
)
LOG.debug(f"Found targets:\n{raw_result}")
result = json.loads(raw_result)
return set(result)
def _get_files_to_process_from_applicable_targets(
self, applicable_targets: Collection[str], buck_root: Path
) -> Set[Path]:
formatted_targets = " ".join([f"{target!r}" for target in applicable_targets])
arguments = f"inputs( set( {formatted_targets} ) )"
with tempfile.NamedTemporaryFile(
"w+", prefix="pyre_configurationless_"
) as file:
file.write(arguments)
file.flush()
buck_command = [
"buck2",
*self._get_isolation_dir(),
"uquery",
f"@{file.name}",
]
LOG.info(
f"Finding files from wildcard target expression with buck2 command: `{buck_command}`"
)
result = subprocess.check_output(
buck_command,
text=True,
cwd=self.local_configuration.root,
).strip()
LOG.debug(f"Found files from applicable targets:\n`{result}`")
return {(buck_root / file.strip()).resolve() for file in result.split("\n")}
def _get_files_from_wildcard_targets(
self, wildcard_targets: Collection[str], buck_project_root: Path
) -> Set[Path]:
if len(wildcard_targets) == 0:
return set()
applicable_targets = (
self._get_applicable_targets_from_wildcard_targets_buck_query(
wildcard_targets
)
)
wildcard_target_files = self._get_files_to_process_from_applicable_targets(
applicable_targets, buck_project_root
)
LOG.debug(
f"Files found from wildcard target filter BXL query\n{wildcard_target_files}"
)
return wildcard_target_files
def _get_sourcedb_from_buck_classic_query(
self, targets: Collection[str]
) -> Optional[Path]:
targets = self.format_buck_targets_for_query(targets)
buck_command = [
"buck2",
*self._get_isolation_dir(),
"bxl",
"prelude//python/sourcedb/classic.bxl:build",
"--",
*targets,
]
LOG.info(f"Finding classic targets with buck2 command: `{buck_command}`")
raw_result = subprocess.check_output(
buck_command,
text=True,
cwd=self.local_configuration.root,
)
result = json.loads(raw_result)
if "db" in result:
return Path(result["db"])
return None
def _get_files_from_sourcedb(
self, sourcedb_path: Path, buck_root: Path
) -> Set[Path]:
LOG.debug(f"Loading files from sourcedb at {str(sourcedb_path)}")
with sourcedb_path.open() as file:
loaded_sourcedb = json.load(file)
if not isinstance(loaded_sourcedb, dict) or "build_map" not in loaded_sourcedb:
LOG.warning(f"Malformed sourcedb at {sourcedb_path}")
return set()
build_map = {buck_root / file for file in loaded_sourcedb["build_map"].values()}
if "dropped_targets" in loaded_sourcedb:
dropped_target_paths = {
buck_root / dropped_target["dropped_source_path"]
for dropped_target in loaded_sourcedb["dropped_targets"].values()
}
build_map |= dropped_target_paths
return {
file
for file in build_map
if file.exists()
and path_is_relative_to(file, self.local_configuration.get_path().parent)
}
def _get_files_from_classic_targets(
self, classic_targets: Collection[str], buck_project_root: Path
) -> Set[Path]:
if len(classic_targets) == 0:
return set()
sourcedb_path = self._get_sourcedb_from_buck_classic_query(classic_targets)
if sourcedb_path is None:
LOG.warning("No sourcedb path produced")
return set()
LOG.debug(f"Sourcedb path found: {sourcedb_path}")
classic_target_files = self._get_files_from_sourcedb(
sourcedb_path, buck_project_root
)
LOG.debug(
f"Files found from classic target filter BXL query\n{classic_target_files}"
)
return classic_target_files
def _get_files_to_migrate_from_targets(
self,
configuration_targets: List[str],
buck_project_root: Path,
) -> Set[Path]:
wildcard_targets: List[str] = [
target for target in configuration_targets if target.endswith("...")
]
classic_targets: List[str] = [
target for target in configuration_targets if not target.endswith("...")
]
wildcard_target_files = self._get_files_from_wildcard_targets(
wildcard_targets, buck_project_root
)
classic_target_files = self._get_files_from_classic_targets(
classic_targets, buck_project_root
)
return {
file
for file in wildcard_target_files | classic_target_files
if any(file.match(pattern) for pattern in self.includes)
}
def _get_files_to_migrate_from_source_directories(
self,
source_directories: List[Path],
) -> Set[Path]:
LOG.debug(f"Finding files with filesystem under {source_directories}")
file_system = filesystem.get_filesystem()
return {
(source_directory / file).resolve()
for source_directory in source_directories
for file in file_system.list(
str(source_directory), patterns=list(self.includes)
)
}
def get_files_from_local_configuration(
self,
local_configuration: Configuration,
buck_root: Path,
) -> Set[Path]:
if local_configuration.targets is not None:
files = self._get_files_to_migrate_from_targets(
local_configuration.targets,
buck_root,
)
elif local_configuration.source_directories is not None:
source_directories = local_configuration.source_directories
local_root = Path(local_configuration.root)
files = self._get_files_to_migrate_from_source_directories(
[local_root / directory for directory in source_directories],
)
else:
raise ValueError(
"Could not find `targets` or `source_directories` keys in local configuration"
)
LOG.debug(
f"Found {len(files)} files in local configuration {local_configuration.get_path()}"
)
non_excluded_files = {
file
for file in files
if not any(
exclude_pattern.search(str(file)) is not None
for exclude_pattern in self.exclude_patterns
)
}
LOG.debug(
f"Found {len(non_excluded_files)} in local configuration {local_configuration.get_path()}"
)
return non_excluded_files
def get_already_migrated_files(
self,
buck_root: Path,
) -> Set[Path]:
already_migrated_files: Set[Path] = set()
nested_configurations = (
self.local_configuration.get_nested_configuration_paths()
)
for configuration_path in nested_configurations:
nested_local_configuration = Configuration(Path(configuration_path))
migrated_files = self.get_files_from_local_configuration(
nested_local_configuration,
buck_root,
)
already_migrated_files |= migrated_files
return already_migrated_files
| ConfigurationlessOptions |
python | doocs__leetcode | solution/0300-0399/0386.Lexicographical Numbers/Solution.py | {
"start": 0,
"end": 338
} | class ____:
def lexicalOrder(self, n: int) -> List[int]:
ans = []
v = 1
for _ in range(n):
ans.append(v)
if v * 10 <= n:
v *= 10
else:
while v % 10 == 9 or v + 1 > n:
v //= 10
v += 1
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py | {
"start": 57335,
"end": 59650
} | class ____(nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1):
super().__init__()
self.leaky_relu_slope = leaky_relu_slope
self.convs1 = nn.ModuleList(
[
nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
dilation=dilation[i],
padding=self.get_padding(kernel_size, dilation[i]),
)
for i in range(len(dilation))
]
)
self.convs2 = nn.ModuleList(
[
nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
dilation=1,
padding=self.get_padding(kernel_size, 1),
)
for _ in range(len(dilation))
]
)
def get_padding(self, kernel_size, dilation=1):
return (kernel_size * dilation - dilation) // 2
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
for layer in self.convs1:
weight_norm(layer)
for layer in self.convs2:
weight_norm(layer)
def remove_weight_norm(self):
for layer in self.convs1:
nn.utils.remove_weight_norm(layer)
for layer in self.convs2:
nn.utils.remove_weight_norm(layer)
def forward(self, hidden_states):
for conv1, conv2 in zip(self.convs1, self.convs2):
residual = hidden_states
hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
hidden_states = conv1(hidden_states)
hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
hidden_states = conv2(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
@auto_docstring(
custom_intro="""
HiFi-GAN vocoder.
"""
)
# Copied from transformers.models.speecht5.modeling_speecht5.SpeechT5HifiGan with SpeechT5->FastSpeech2Conformer
| HifiGanResidualBlock |
python | getsentry__sentry | tests/snuba/sessions/test_sessions.py | {
"start": 13966,
"end": 21634
} | class ____(TestCase, ReleaseHealthBaseTestCase):
backend = MetricsReleaseHealthBackend()
def test_get_oldest_health_data_for_releases(self) -> None:
self.bulk_store_sessions(self.create_sessions__v2_crashed())
data = self.backend.get_oldest_health_data_for_releases([(self.project.id, release_v1_0_0)])
assert data == {
(self.project.id, release_v1_0_0): format_timestamp(self.session_started // 3600 * 3600)
}
def test_get_release_adoption(self) -> None:
self.bulk_store_sessions(self.create_sessions__v2_crashed())
data = self.backend.get_release_adoption(
[
(self.project.id, release_v1_0_0),
(self.project.id, release_v2_0_0),
(self.project.id, "dummy-release"),
]
)
assert data == {
(self.project.id, release_v1_0_0): {
"sessions_24h": 2,
"users_24h": 1,
"adoption": 100.0,
"sessions_adoption": 66.66666666666666,
"project_sessions_24h": 3,
"project_users_24h": 1,
},
(self.project.id, release_v2_0_0): {
"sessions_24h": 1,
"users_24h": 1,
"adoption": 100.0,
"sessions_adoption": 33.33333333333333,
"project_sessions_24h": 3,
"project_users_24h": 1,
},
}
def test_get_release_adoption_lowered(self) -> None:
self.bulk_store_sessions(self.create_sessions__v2_crashed())
self.store_session(
self.build_session(
release=release_v2_0_0,
environment="prod",
status="crashed",
started=self.session_started,
received=self.received,
)
)
data = self.backend.get_release_adoption(
[
(self.project.id, release_v1_0_0),
(self.project.id, release_v2_0_0),
(self.project.id, "dummy-release"),
]
)
assert data == {
(self.project.id, release_v1_0_0): {
"sessions_24h": 2,
"users_24h": 1,
"adoption": 50.0,
"sessions_adoption": 50.0,
"project_sessions_24h": 4,
"project_users_24h": 2,
},
(self.project.id, release_v2_0_0): {
"sessions_24h": 2,
"users_24h": 2,
"adoption": 100.0,
"sessions_adoption": 50.0,
"project_sessions_24h": 4,
"project_users_24h": 2,
},
}
def test_fetching_release_sessions_time_bounds_for_different_release(self) -> None:
"""
Test that ensures only session bounds for releases are calculated according
to their respective release
"""
self.bulk_store_sessions(self.create_sessions__v2_crashed())
# Same release session
self.store_session(
self.build_session(
release=release_v1_0_0,
environment="prod",
status="exited",
started=self.session_started - 3600 * 2,
received=self.received - 3600 * 2,
)
)
# Different release session
self.store_session(
self.build_session(
release=release_v2_0_0,
environment="prod",
status="crashed",
started=self.session_started - 3600 * 2,
received=self.received - 3600 * 2,
)
)
expected_formatted_lower_bound = (
datetime.fromtimestamp(self.session_started - 3600 * 2)
.replace(minute=0)
.isoformat()[:19]
+ "Z"
)
expected_formatted_upper_bound = (
datetime.fromtimestamp(self.session_started).replace(minute=0).isoformat()[:19] + "Z"
)
# Test for self.session_release
data = self.backend.get_release_sessions_time_bounds(
project_id=self.project.id,
release=release_v1_0_0,
org_id=self.organization.id,
environments=["prod"],
)
assert data == {
"sessions_lower_bound": expected_formatted_lower_bound,
"sessions_upper_bound": expected_formatted_upper_bound,
}
# Test for self.session_crashed_release
data = self.backend.get_release_sessions_time_bounds(
project_id=self.project.id,
release=release_v2_0_0,
org_id=self.organization.id,
environments=["prod"],
)
assert data == {
"sessions_lower_bound": expected_formatted_lower_bound,
"sessions_upper_bound": expected_formatted_upper_bound,
}
def test_fetching_release_sessions_time_bounds_for_different_release_with_no_sessions(
self,
) -> None:
"""
Test that ensures if no sessions are available for a specific release then the bounds
should be returned as None
"""
self.bulk_store_sessions(self.create_sessions__v2_crashed())
data = self.backend.get_release_sessions_time_bounds(
project_id=self.project.id,
release="different_release",
org_id=self.organization.id,
environments=["prod"],
)
assert data == {
"sessions_lower_bound": None,
"sessions_upper_bound": None,
}
def test_basic_release_model_adoptions(self) -> None:
"""
Test that the basic (project,release) data is returned
"""
self.bulk_store_sessions(self.create_sessions__v2_crashed())
proj_id = self.project.id
data = self.backend.get_changed_project_release_model_adoptions([proj_id])
assert set(data) == {(proj_id, "foo@1.0.0"), (proj_id, "foo@2.0.0")}
def test_old_release_model_adoptions(self) -> None:
"""
Test that old entries (older that 72 h) are not returned
"""
self.bulk_store_sessions(self.create_sessions__v2_crashed())
_100h = 100 * 60 * 60 # 100 hours in seconds
proj_id = self.project.id
self.store_session(
self.build_session(
release="foo@3.0.0",
environment="prod",
status="crashed",
started=self.session_started - _100h,
received=self.received - 3600 * 2,
)
)
data = self.backend.get_changed_project_release_model_adoptions([proj_id])
assert set(data) == {(proj_id, "foo@1.0.0"), (proj_id, "foo@2.0.0")}
def test_multi_proj_release_model_adoptions(self) -> None:
"""Test that the api works with multiple projects"""
self.bulk_store_sessions(self.create_sessions__v2_crashed())
proj_id = self.project.id
new_proj_id = proj_id + 1
self.store_session(
self.build_session(
project_id=new_proj_id,
release="foo@3.0.0",
environment="prod",
status="crashed",
started=self.session_started,
received=self.received - 3600 * 2,
)
)
data = self.backend.get_changed_project_release_model_adoptions([proj_id, new_proj_id])
assert set(data) == {
(proj_id, "foo@1.0.0"),
(proj_id, "foo@2.0.0"),
(new_proj_id, "foo@3.0.0"),
}
| SnubaSessionsTest |
python | python__mypy | mypy/test/teststubtest.py | {
"start": 3324,
"end": 3555
} | class ____(type):
def __len__(self) -> int: pass
def __iter__(self: type[_T]) -> Iterator[_T]: pass
def __reversed__(self: type[_T]) -> Iterator[_T]: pass
def __getitem__(self: type[_T], name: str) -> _T: pass
| EnumMeta |
python | django__django | tests/m2m_through_regress/models.py | {
"start": 738,
"end": 1080
} | class ____(models.Model):
name = models.CharField(max_length=128)
# Membership object defined as a class
members = models.ManyToManyField(Person, through=Membership)
user_members = models.ManyToManyField(User, through="UserMembership")
def __str__(self):
return self.name
# Using to_field on the through model
| Group |
python | walkccc__LeetCode | solutions/3254. Find the Power of K-Size Subarrays I/3254.py | {
"start": 0,
"end": 295
} | class ____:
def resultsArray(self, nums: list[int], k: int) -> list[int]:
ans = []
start = 0
for i, num in enumerate(nums):
if i > 0 and num != nums[i - 1] + 1:
start = i
if i >= k - 1:
ans.append(num if i - start + 1 >= k else -1)
return ans
| Solution |
python | getsentry__sentry | tests/sentry/snuba/test_query_subscription_consumer.py | {
"start": 1218,
"end": 2510
} | class ____:
@cached_property
def dataset(self):
return Dataset.Metrics
@cached_property
def topic(self):
return Topic.METRICS_SUBSCRIPTIONS_RESULTS.value
@cached_property
def jsoncodec(self):
return get_codec(self.topic)
@cached_property
def valid_wrapper(self):
return {"version": 3, "payload": self.valid_payload}
@cached_property
def valid_payload(self):
return {
"subscription_id": "1234",
"result": {
"data": [{"hello": 50}],
"meta": [{"name": "count", "type": "UInt64"}],
},
"request": {
"some": "data",
"query": """MATCH (metrics_counters) SELECT sum(value) AS value BY
tags[3] WHERE org_id = 1 AND project_id IN tuple(1) AND metric_id = 16
AND tags[3] IN tuple(13, 4)""",
},
"entity": "metrics_counters",
"timestamp": "2020-01-01T01:23:45.1234",
}
def build_mock_message(self, data, topic=None):
message = mock.Mock()
message.value.return_value = json.dumps(data)
if topic:
message.topic.return_value = topic
return message
| BaseQuerySubscriptionTest |
python | django__django | tests/template_tests/test_base.py | {
"start": 1456,
"end": 1614
} | class ____(LexerTestMixin, SimpleTestCase):
lexer_class = DebugLexer
def make_expected(self):
return self.expected_token_tuples
| DebugLexerTests |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-openai/llama_index/embeddings/openai/base.py | {
"start": 7193,
"end": 16582
} | class ____(BaseEmbedding):
"""
OpenAI class for embeddings.
Args:
mode (str): Mode for embedding.
Defaults to OpenAIEmbeddingMode.TEXT_SEARCH_MODE.
Options are:
- OpenAIEmbeddingMode.SIMILARITY_MODE
- OpenAIEmbeddingMode.TEXT_SEARCH_MODE
model (str): Model for embedding.
Defaults to OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002.
Options are:
- OpenAIEmbeddingModelType.DAVINCI
- OpenAIEmbeddingModelType.CURIE
- OpenAIEmbeddingModelType.BABBAGE
- OpenAIEmbeddingModelType.ADA
- OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002
"""
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the OpenAI API."
)
api_key: str = Field(description="The OpenAI API key.")
api_base: Optional[str] = Field(
default=DEFAULT_OPENAI_API_BASE, description="The base URL for OpenAI API."
)
api_version: Optional[str] = Field(
default=DEFAULT_OPENAI_API_VERSION, description="The version for OpenAI API."
)
max_retries: int = Field(default=10, description="Maximum number of retries.", ge=0)
timeout: float = Field(default=60.0, description="Timeout for each request.", ge=0)
default_headers: Optional[Dict[str, str]] = Field(
default=None, description="The default headers for API requests."
)
reuse_client: bool = Field(
default=True,
description=(
"Reuse the OpenAI client between requests. When doing anything with large "
"volumes of async API calls, setting this to false can improve stability."
),
)
dimensions: Optional[int] = Field(
default=None,
description=(
"The number of dimensions on the output embedding vectors. "
"Works only with v3 embedding models."
),
)
_query_engine: str = PrivateAttr()
_text_engine: str = PrivateAttr()
_client: Optional[OpenAI] = PrivateAttr()
_aclient: Optional[AsyncOpenAI] = PrivateAttr()
_http_client: Optional[httpx.Client] = PrivateAttr()
_async_http_client: Optional[httpx.AsyncClient] = PrivateAttr()
def __init__(
self,
mode: str = OpenAIEmbeddingMode.TEXT_SEARCH_MODE,
model: str = OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002,
embed_batch_size: int = 100,
dimensions: Optional[int] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
async_http_client: Optional[httpx.AsyncClient] = None,
num_workers: Optional[int] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
if dimensions is not None:
additional_kwargs["dimensions"] = dimensions
api_key, api_base, api_version = self._resolve_credentials(
api_key=api_key,
api_base=api_base,
api_version=api_version,
)
query_engine = get_engine(mode, model, _QUERY_MODE_MODEL_DICT)
text_engine = get_engine(mode, model, _TEXT_MODE_MODEL_DICT)
if "model_name" in kwargs:
model_name = kwargs.pop("model_name")
query_engine = text_engine = model_name
else:
model_name = model
super().__init__(
embed_batch_size=embed_batch_size,
dimensions=dimensions,
callback_manager=callback_manager,
model_name=model_name,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
reuse_client=reuse_client,
timeout=timeout,
default_headers=default_headers,
num_workers=num_workers,
**kwargs,
)
self._query_engine = query_engine
self._text_engine = text_engine
self._client = None
self._aclient = None
self._http_client = http_client
self._async_http_client = async_http_client
def _resolve_credentials(
self,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
) -> Tuple[Optional[str], str, str]:
return resolve_openai_credentials(api_key, api_base, api_version)
def _get_client(self) -> OpenAI:
if not self.reuse_client:
return OpenAI(**self._get_credential_kwargs())
if self._client is None:
self._client = OpenAI(**self._get_credential_kwargs())
return self._client
def _get_aclient(self) -> AsyncOpenAI:
if not self.reuse_client:
return AsyncOpenAI(**self._get_credential_kwargs(is_async=True))
if self._aclient is None:
self._aclient = AsyncOpenAI(**self._get_credential_kwargs(is_async=True))
return self._aclient
def _create_retry_decorator(self):
"""Create a retry decorator using the instance's max_retries."""
return create_retry_decorator(
max_retries=self.max_retries,
random_exponential=True,
stop_after_delay_seconds=60,
min_seconds=1,
max_seconds=20,
)
@classmethod
def class_name(cls) -> str:
return "OpenAIEmbedding"
def _get_credential_kwargs(self, is_async: bool = False) -> Dict[str, Any]:
return {
"api_key": self.api_key,
"base_url": self.api_base,
"max_retries": self.max_retries,
"timeout": self.timeout,
"default_headers": self.default_headers,
"http_client": self._async_http_client if is_async else self._http_client,
}
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
client = self._get_client()
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _retryable_get_embedding():
return get_embedding(
client,
query,
engine=self._query_engine,
**self.additional_kwargs,
)
return _retryable_get_embedding()
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
aclient = self._get_aclient()
retry_decorator = self._create_retry_decorator()
@retry_decorator
async def _retryable_aget_embedding():
return await aget_embedding(
aclient,
query,
engine=self._query_engine,
**self.additional_kwargs,
)
return await _retryable_aget_embedding()
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
client = self._get_client()
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _retryable_get_embedding():
return get_embedding(
client,
text,
engine=self._text_engine,
**self.additional_kwargs,
)
return _retryable_get_embedding()
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
aclient = self._get_aclient()
retry_decorator = self._create_retry_decorator()
@retry_decorator
async def _retryable_aget_embedding():
return await aget_embedding(
aclient,
text,
engine=self._text_engine,
**self.additional_kwargs,
)
return await _retryable_aget_embedding()
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""
Get text embeddings.
By default, this is a wrapper around _get_text_embedding.
Can be overridden for batch queries.
"""
client = self._get_client()
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _retryable_get_embeddings():
return get_embeddings(
client,
texts,
engine=self._text_engine,
**self.additional_kwargs,
)
return _retryable_get_embeddings()
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
aclient = self._get_aclient()
retry_decorator = self._create_retry_decorator()
@retry_decorator
async def _retryable_aget_embeddings():
return await aget_embeddings(
aclient,
texts,
engine=self._text_engine,
**self.additional_kwargs,
)
return await _retryable_aget_embeddings()
| OpenAIEmbedding |
python | django__django | tests/admin_inlines/models.py | {
"start": 1717,
"end": 1908
} | class ____(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
| EditablePKBook |
python | matplotlib__matplotlib | lib/matplotlib/animation.py | {
"start": 55854,
"end": 58798
} | class ____(Animation):
"""
`Animation` subclass for time-based animation.
A new frame is drawn every *interval* milliseconds.
.. note::
You must store the created Animation in a variable that lives as long
as the animation should run. Otherwise, the Animation object will be
garbage-collected and the animation stops.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure object used to get needed events, such as draw or resize.
interval : int, default: 200
Delay between frames in milliseconds.
repeat_delay : int, default: 0
The delay in milliseconds between consecutive animation runs, if
*repeat* is True.
repeat : bool, default: True
Whether the animation repeats when the sequence of frames is completed.
blit : bool, default: False
Whether blitting is used to optimize drawing.
"""
def __init__(self, fig, interval=200, repeat_delay=0, repeat=True,
event_source=None, *args, **kwargs):
self._interval = interval
# Undocumented support for repeat_delay = None as backcompat.
self._repeat_delay = repeat_delay if repeat_delay is not None else 0
self._repeat = repeat
# If we're not given an event source, create a new timer. This permits
# sharing timers between animation objects for syncing animations.
if event_source is None:
event_source = fig.canvas.new_timer(interval=self._interval)
super().__init__(fig, event_source=event_source, *args, **kwargs)
def _step(self, *args):
"""Handler for getting events."""
# Extends the _step() method for the Animation class. If
# Animation._step signals that it reached the end and we want to
# repeat, we refresh the frame sequence and return True. If
# _repeat_delay is set, change the event_source's interval to our loop
# delay and set the callback to one which will then set the interval
# back.
still_going = super()._step(*args)
if not still_going:
if self._repeat:
# Restart the draw loop
self._init_draw()
self.frame_seq = self.new_frame_seq()
self.event_source.interval = self._repeat_delay
return True
else:
# We are done with the animation. Call pause to remove
# animated flags from artists that were using blitting
self.pause()
if self._blit:
# Remove the resize callback if we were blitting
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source = None
return False
self.event_source.interval = self._interval
return True
| TimedAnimation |
python | apache__avro | lang/py/avro/test/test_name.py | {
"start": 935,
"end": 9783
} | class ____(unittest.TestCase):
"""Test name parsing"""
def test_name_is_none(self):
"""When a name is None its namespace is None."""
self.assertIsNone(avro.schema.Name(None, None, None).fullname)
self.assertIsNone(avro.schema.Name(None, None, None).space)
def test_name_not_empty_string(self):
"""A name cannot be the empty string."""
self.assertRaises(avro.errors.SchemaParseException, avro.schema.Name, "", None, None)
def test_name_space_specified(self):
"""Space combines with a name to become the fullname."""
# name and namespace specified
fullname = avro.schema.Name("a", "o.a.h", None).fullname
self.assertEqual(fullname, "o.a.h.a")
def test_name_inlined_space(self):
"""Space inlined with name is correctly splitted out."""
name = avro.schema.Name("o.a", None)
self.assertEqual(name.fullname, "o.a")
self.assertEqual(name.name, "a")
self.assertEqual(name.space, "o")
name = avro.schema.Name("o.a.h.a", None)
self.assertEqual(name.fullname, "o.a.h.a")
self.assertEqual(name.name, "a")
self.assertEqual(name.space, "o.a.h")
def test_fullname_space_specified(self):
"""When name contains dots, namespace should be ignored."""
fullname = avro.schema.Name("a.b.c.d", "o.a.h", None).fullname
self.assertEqual(fullname, "a.b.c.d")
def test_name_default_specified(self):
"""Default space becomes the namespace when the namespace is None."""
fullname = avro.schema.Name("a", None, "b.c.d").fullname
self.assertEqual(fullname, "b.c.d.a")
def test_fullname_default_specified(self):
"""When a name contains dots, default space should be ignored."""
fullname = avro.schema.Name("a.b.c.d", None, "o.a.h").fullname
self.assertEqual(fullname, "a.b.c.d")
def test_fullname_space_default_specified(self):
"""When a name contains dots, namespace and default space should be ignored."""
fullname = avro.schema.Name("a.b.c.d", "o.a.a", "o.a.h").fullname
self.assertEqual(fullname, "a.b.c.d")
def test_name_space_default_specified(self):
"""When name and space are specified, default space should be ignored."""
fullname = avro.schema.Name("a", "o.a.a", "o.a.h").fullname
self.assertEqual(fullname, "o.a.a.a")
def test_equal_names(self):
"""Equality of names is defined on the fullname and is case-sensitive."""
self.assertEqual(
avro.schema.Name("a.b.c.d", None, None),
avro.schema.Name("d", "a.b.c", None),
)
self.assertNotEqual(avro.schema.Name("C.d", None, None), avro.schema.Name("c.d", None, None))
def test_invalid_name(self):
"""The name portion of a fullname, record field names, and enum symbols must:
start with [A-Za-z_] and subsequently contain only [A-Za-z0-9_]"""
self.assertRaises(
avro.errors.InvalidName,
avro.schema.Name,
"an especially spacey cowboy",
None,
None,
)
self.assertRaises(
avro.errors.InvalidName,
avro.schema.Name,
"99 problems but a name aint one",
None,
None,
)
# A name cannot start with dot.
self.assertRaises(avro.errors.InvalidName, avro.schema.Name, ".a", None, None)
self.assertRaises(avro.errors.InvalidName, avro.schema.Name, "o..a", None, None)
self.assertRaises(avro.errors.InvalidName, avro.schema.Name, "a.", None, None)
def test_null_namespace(self):
"""The empty string may be used as a namespace to indicate the null namespace."""
name = avro.schema.Name("name", "", None)
self.assertEqual(name.fullname, "name")
self.assertIsNone(name.space)
def test_disable_name_validation(self):
"""Test name validation disable."""
# Test name class
avro.schema.Name(name_attr="an especially spacey cowboy", space_attr=None, default_space=None, validate_name=False)
avro.schema.Name(name_attr="cowboy", space_attr="an especially spacey ", default_space=None, validate_name=False)
avro.schema.Name(name_attr="cowboy", space_attr=None, default_space="an especially spacey ", validate_name=False)
avro.schema.Name(name_attr="name-space.with-dash.cowboy", space_attr=None, default_space=None, validate_name=False)
avro.schema.Name(name_attr="cowboy", space_attr="name-space.with-dash", default_space=None, validate_name=False)
# Test record schema
cowboy_record_1 = avro.schema.RecordSchema(
name="an especially spacey cowboy", namespace=None, fields=[{"name": "value", "type": "long"}], validate_names=False
)
cowboy_record_2 = avro.schema.RecordSchema(
name="cowboy", namespace="an especially spacey ", fields=[{"name": "value", "type": "int"}], validate_names=False
)
# Test Names container class
names = avro.schema.Names(default_namespace=None, validate_names=False)
names.add_name(name_attr=cowboy_record_1.name, space_attr=cowboy_record_1.namespace, new_schema=cowboy_record_1)
names.add_name(name_attr=cowboy_record_2.name, space_attr=cowboy_record_2.namespace, new_schema=cowboy_record_2)
# Test fixed schema
avro.schema.FixedSchema(name="an especially spacey cowboy", namespace=None, size=16, validate_names=False)
avro.schema.FixedSchema(name="cowboy", namespace="an especially spacey", size=16, validate_names=False)
# Test fixed decimal schema
avro.schema.FixedDecimalSchema(name="an especially spacey cowboy", namespace=None, size=16, precision=2, validate_names=False)
avro.schema.FixedDecimalSchema(name="cowboy", namespace="an especially spacey", size=16, precision=2, validate_names=False)
# Test enum schema
avro.schema.EnumSchema(name="an especially spacey cowboy", namespace=None, symbols=["A", "B"], validate_names=False)
avro.schema.EnumSchema(name="cowboy", namespace="an especially spacey", symbols=["A", "B"], validate_names=False)
EXAMPLES = [
# Enum
{"type": "enum", "name": "invalid-name", "symbols": ["A", "B"]},
{"type": "enum", "name": "invalid-ns.ab", "symbols": ["A", "B"]},
{"type": "enum", "name": "ab", "namespace": "invalid-ns", "symbols": ["A", "B"]},
# Record
{"type": "record", "name": "invalid-name", "fields": [{"name": "distance", "type": "long"}]},
{"type": "record", "name": "invalid-ns.journey", "fields": [{"name": "distance", "type": "long"}]},
{"type": "record", "name": "journey", "namespace": "invalid-ns", "fields": [{"name": "distance", "type": "long"}]},
# FixedSchema
{"type": "fixed", "name": "invalid-name", "size": 10, "precision": 2},
{"type": "fixed", "name": "invalid-ns.irrational", "size": 10, "precision": 2},
{"type": "fixed", "name": "irrational", "namespace": "invalid-ns", "size": 10, "precision": 2},
# FixedDecimalSchema / logical type
{"type": "fixed", "logicalType": "decimal", "name": "invalid-name", "size": 10, "precision": 2},
{"type": "fixed", "logicalType": "decimal", "name": "invalid-ns.irrational", "size": 10, "precision": 2},
{"type": "fixed", "logicalType": "decimal", "name": "irrational", "namespace": "invalid-ns", "size": 10, "precision": 2},
# In fields
{
"type": "record",
"name": "world",
"fields": [
{
"type": {"type": "record", "name": "invalid-name", "fields": [{"name": "distance", "type": "long"}]},
"name": "cup",
},
],
},
# In union
[{"type": "string"}, {"type": "record", "name": "invalid-name", "fields": [{"name": "distance", "type": "long"}]}],
# In array
{
"type": "record",
"name": "world",
"fields": [
{
"name": "journeys",
"type": {
"type": "array",
"items": {
"type": "record",
"name": "invalid-name",
"fields": [{"name": "distance", "type": "long"}],
},
},
},
],
},
# In map
{
"type": "record",
"name": "world",
"fields": [
{
"name": "journeys",
"type": {
"type": "map",
"values": {
"type": "record",
"name": "invalid-name",
"fields": [{"name": "distance", "type": "long"}],
},
},
},
],
},
]
| TestName |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 543334,
"end": 544418
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"edges",
"filtered_count",
"nodes",
"page_count",
"page_info",
"total_count",
"updated_at",
)
edges = sgqlc.types.Field(
sgqlc.types.list_of("PullRequestTimelineItemsEdge"), graphql_name="edges"
)
filtered_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="filteredCount"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("PullRequestTimelineItems"), graphql_name="nodes"
)
page_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="pageCount")
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
| PullRequestTimelineItemsConnection |
python | pytorch__pytorch | test/functorch/test_ac_knapsack.py | {
"start": 7594,
"end": 13801
} | class ____(TestCase):
"""
Test class for KnapsackEvaluator.
The test class sets up a small graph example and tests the methods validating the knapsack evaluation logic.
"""
def setUp(self) -> None:
super().setUp()
self.graph_nodes_in_order = [
"node1",
"node2",
"node3",
"node4",
"node5",
"output",
]
self.graph_edges = [
("node1", "node2"),
("node2", "node3"),
("node3", "node4"),
("node4", "node5"),
("node5", "output"),
("node1", "output"),
]
self.all_recomputable_banned_nodes = ["node1", "node2", "node5"]
self.recorded_knapsack_input_memories = [0.1, 0.2, 0.2]
self.recorded_knapsack_input_runtimes = [100.0, 50.0, 51.0]
self.graph_info_provider = GraphInfoProvider(
graph_nodes_in_order=self.graph_nodes_in_order,
graph_edges=self.graph_edges,
all_recomputable_banned_nodes=self.all_recomputable_banned_nodes,
recorded_knapsack_input_memories=self.recorded_knapsack_input_memories,
recorded_knapsack_input_runtimes=self.recorded_knapsack_input_runtimes,
)
self.knapsack_evaluator = KnapsackEvaluator(
graph_info_provider=self.graph_info_provider
)
self.knapsack_algo = lambda memory_values, runtime_values, memory_budget: {
0.1: (101.0, [0], [1, 2]),
0.2: (101.0, [0], [1, 2]),
0.3: (50.0, [0, 2], [1]),
0.4: (50.0, [0, 2], [1]),
0.5: (0.0, [0, 1, 2], []),
}.get(memory_budget, (0.0, [0, 1, 2], []))
def test_evaluate_knapsack_output_not_accounting_for_backward_pass(self):
saved_nodes_idxs = [0]
recomputable_node_idxs = [1, 2]
result = self.knapsack_evaluator.evaluate_knapsack_output(
saved_nodes_idxs=saved_nodes_idxs,
recomputable_node_idxs=recomputable_node_idxs,
)
self.assertEqual(result["peak_memory"], 0.1)
self.assertEqual(result["recomputation_runtime"], 101.0)
def test_evaluate_knapsack_output_accounting_for_backward_pass(self):
saved_nodes_idxs = [0]
recomputable_node_idxs = [1, 2]
result = self.knapsack_evaluator.evaluate_knapsack_output(
saved_nodes_idxs=saved_nodes_idxs,
recomputable_node_idxs=recomputable_node_idxs,
account_for_backward_pass=True,
)
self.assertEqual(result["peak_memory"], 0.5)
self.assertEqual(result["recomputation_runtime"], 101.0)
def test_evaluate_knapsack_output_with_wrong_sized_values(self):
saved_nodes_idxs = [0]
recomputable_node_idxs = [1]
with self.assertRaises(AssertionError):
self.knapsack_evaluator.evaluate_knapsack_output(
saved_nodes_idxs=saved_nodes_idxs,
recomputable_node_idxs=recomputable_node_idxs,
)
def test_evaluate_distribution_of_results_for_knapsack_algo(self):
memory_budget_values = [0.1, 0.2, 0.3]
results = (
self.knapsack_evaluator.evaluate_distribution_of_results_for_knapsack_algo(
knapsack_algo=self.knapsack_algo,
memory_budget_values=memory_budget_values,
)
)
self.assertEqual(len(results), len(memory_budget_values))
self.assertEqual(results[0]["memory_budget"], 0.1)
self.assertEqual(results[0]["peak_memory"], 0.1)
self.assertEqual(results[0]["recomputation_runtime"], 101)
self.assertEqual(results[1]["non_ac_peak_memory"], 0.5)
self.assertEqual(results[1]["theoretical_max_runtime"], 201)
self.assertEqual(results[2]["percentage_of_theoretical_peak_memory"], 0.3 / 0.5)
self.assertEqual(
results[2]["percentage_of_theoretical_peak_runtime"], 50.0 / 201
)
def test_get_knee_point_memory_budget(self):
"""
Checks if the method correctly estimates the knee point in the memory budget
where the trade-off between memory usage and recomputation runtime is optimal.
If memory budget and runtime are considered as equal cost, then the knee point
is where the distance from 0 is smallest.
"""
max_mem_budget_to_expected_knee_point = {
0.1: 0.1,
0.2: 0.1,
0.3: 0.3,
0.4: 0.4, # 0.3 and 0.4 provide the same algo output so this is arbitrary
0.5: 0.4,
}
for (
max_mem_budget,
expected_knee_point,
) in max_mem_budget_to_expected_knee_point.items():
knee_point_memory_budget = (
self.knapsack_evaluator.get_knee_point_memory_budget(
knapsack_algo=self.knapsack_algo,
max_mem_budget=max_mem_budget,
min_mem_budget=0.1,
iterations=5,
)
)
self.assertEqual(knee_point_memory_budget, expected_knee_point)
def test_get_backward_memory_from_topologically_sorted_graph(self):
result = self.knapsack_evaluator._get_backward_memory_from_topologically_sorted_graph(
node_graph=self.graph_info_provider.recomputable_node_only_graph_with_larger_graph_context,
node_memories=self.graph_info_provider.all_node_memories,
saved_nodes_set={"node1"},
peak_memory_after_forward_pass=0.1,
)
expected_result = [
(0.1, "Initial Peak/Current Memory"),
(0.3, "Recomputing Node: node5"),
(0.5, "Recomputing Predecessor of node5: node2"),
(0.3, "Dropping Node: node5"),
(0.1, "Dropping Node(already saved): node2"),
(0.0, "Dropping Node(already saved): node1"),
]
print(result, expected_result)
for result_item, expected_result_item in zip(result, expected_result):
self.assertAlmostEqual(result_item[0], expected_result_item[0])
self.assertEqual(result_item[1], expected_result_item[1])
| TestKnapsackEvaluator |
python | ipython__ipython | IPython/testing/plugin/pytest_ipdoctest.py | {
"start": 8137,
"end": 16667
} | class ____(pytest.Item):
_user_ns_orig: Dict[str, Any]
def __init__(
self,
name: str,
parent: "Union[IPDoctestTextfile, IPDoctestModule]",
runner: Optional["IPDocTestRunner"] = None,
dtest: Optional["doctest.DocTest"] = None,
) -> None:
super().__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request: Optional[FixtureRequest] = None
self._user_ns_orig = {}
@classmethod
def from_parent( # type: ignore
cls,
parent: "Union[IPDoctestTextfile, IPDoctestModule]",
*,
name: str,
runner: "IPDocTestRunner",
dtest: "doctest.DocTest",
):
# incompatible signature due to imposed limits on subclass
"""The public named constructor."""
return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest)
def setup(self) -> None:
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfixturevalue)
for name, value in self.fixture_request.getfixturevalue(
"ipdoctest_namespace"
).items():
globs[name] = value
self.dtest.globs.update(globs)
from .ipdoctest import IPExample
if isinstance(self.dtest.examples[0], IPExample):
# for IPython examples *only*, we swap the globals with the ipython
# namespace, after updating it with the globals (which doctest
# fills with the necessary info from the module being tested).
self._user_ns_orig = {}
self._user_ns_orig.update(_ip.user_ns)
_ip.user_ns.update(self.dtest.globs)
# We must remove the _ key in the namespace, so that Python's
# doctest code sets it naturally
_ip.user_ns.pop("_", None)
_ip.user_ns["__builtins__"] = builtins
self.dtest.globs = _ip.user_ns
def teardown(self) -> None:
from .ipdoctest import IPExample
# Undo the test.globs reassignment we made
if isinstance(self.dtest.examples[0], IPExample):
self.dtest.globs = {}
_ip.user_ns.clear()
_ip.user_ns.update(self._user_ns_orig)
del self._user_ns_orig
self.dtest.globs.clear()
def runtest(self) -> None:
assert self.dtest is not None
assert self.runner is not None
_check_all_skipped(self.dtest)
self._disable_output_capturing_for_darwin()
failures: List[doctest.DocTestFailure] = []
# exec(compile(..., "single", ...), ...) puts result in builtins._
had_underscore_value = hasattr(builtins, "_")
underscore_original_value = getattr(builtins, "_", None)
# Save our current directory and switch out to the one where the
# test was originally created, in case another doctest did a
# directory change. We'll restore this in the finally clause.
curdir = os.getcwd()
os.chdir(self.fspath.dirname)
try:
# Type ignored because we change the type of `out` from what
# ipdoctest expects.
self.runner.run(self.dtest, out=failures, clear_globs=False) # type: ignore[arg-type]
finally:
os.chdir(curdir)
if had_underscore_value:
setattr(builtins, "_", underscore_original_value)
elif hasattr(builtins, "_"):
delattr(builtins, "_")
if failures:
raise MultipleDoctestFailures(failures)
def _disable_output_capturing_for_darwin(self) -> None:
"""Disable output capturing. Otherwise, stdout is lost to ipdoctest (pytest#985)."""
if platform.system() != "Darwin":
return
capman = self.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
# TODO: Type ignored -- breaks Liskov Substitution.
def repr_failure( # type: ignore[override]
self,
excinfo: ExceptionInfo[BaseException],
) -> Union[str, TerminalRepr]:
import doctest
failures: Optional[
Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]]
] = None
if isinstance(
excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException)
):
failures = [excinfo.value]
elif isinstance(excinfo.value, MultipleDoctestFailures):
failures = excinfo.value.failures
if failures is None:
return super().repr_failure(excinfo)
reprlocation_lines = []
for failure in failures:
example = failure.example
test = failure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = type(failure).__name__
# TODO: ReprFileLocation doesn't expect a None lineno.
reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type]
checker = _get_checker()
report_choice = _get_report_choice(self.config.getoption("ipdoctestreport"))
if lineno is not None:
assert failure.test.docstring is not None
lines = failure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
assert test.lineno is not None
lines = [
"%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines)
]
# trim docstring error lines to 10
lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
else:
lines = [
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
]
indent = ">>>"
for line in example.source.splitlines():
lines.append(f"??? {indent} {line}")
indent = "..."
if isinstance(failure, doctest.DocTestFailure):
lines += checker.output_difference(
example, failure.got, report_choice
).split("\n")
else:
inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
lines += [
x.strip("\n") for x in traceback.format_exception(*failure.exc_info)
]
reprlocation_lines.append((reprlocation, lines))
return ReprFailDoctest(reprlocation_lines)
def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]:
assert self.dtest is not None
return self.path, self.dtest.lineno, "[ipdoctest] %s" % self.name
if pytest_version[0] < 7:
@property
def path(self) -> Path:
return Path(self.fspath)
def _get_flag_lookup() -> Dict[str, int]:
import doctest
return dict(
DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
NUMBER=_get_number_flag(),
)
def get_optionflags(parent):
optionflags_str = parent.config.getini("ipdoctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
def _get_continue_on_failure(config):
continue_on_failure = config.getvalue("ipdoctest_continue_on_failure")
if continue_on_failure:
# We need to turn off this if we use pdb since we should stop at
# the first failure.
if config.getvalue("usepdb"):
continue_on_failure = False
return continue_on_failure
| IPDoctestItem |
python | coleifer__peewee | tests/schema.py | {
"start": 819,
"end": 963
} | class ____(TestModel):
data = IntegerField(null=True, constraints=[Check('data < 5')])
value = TextField(collation='NOCASE')
| TMConstraints |
python | google__pytype | pytype/tests/test_pyi2.py | {
"start": 6975,
"end": 7761
} | class ____(test_base.BaseTest):
"""Tests for __all__."""
def test_star_import(self):
with self.DepTree([
(
"foo.pyi",
"""
import datetime
__all__ = ['f', 'g']
def f(x): ...
def h(x): ...
""",
),
(
"bar.pyi",
"""
from foo import *
""",
),
]):
self.CheckWithErrors("""
import bar
a = bar.datetime # module-attr
b = bar.f(1)
c = bar.h(1) # module-attr
""")
def test_http_client(self):
"""Check that we can get unexported symbols from http.client."""
self.Check("""
from http import client
from six.moves import http_client
status = http_client.FOUND or client.FOUND
""")
| PYITestAll |
python | django__django | tests/many_to_many/models.py | {
"start": 1650,
"end": 1838
} | class ____(models.Model):
class Meta:
abstract = True
publications = models.ManyToManyField(
Publication, name="publications", related_name="+"
)
| AbstractArticle |
python | django__django | tests/gis_tests/geoapp/models.py | {
"start": 1391,
"end": 1465
} | class ____(models.Model):
val = models.BooleanField(default=False)
| Truth |
python | Netflix__metaflow | metaflow/flowspec.py | {
"start": 4978,
"end": 9218
} | class ____(type):
def __init__(cls, name, bases, attrs):
super().__init__(name, bases, attrs)
if name == "FlowSpec":
return
cls._init_attrs()
def _init_attrs(cls):
from .decorators import (
DuplicateFlowDecoratorException,
) # Prevent circular import
# We store some state in the flow class itself. This is primarily used to
# attach global state to a flow. It is *not* an actual global because of
# Runner/NBRunner. This is also created here in the meta class to avoid it being
# shared between different children classes.
# Keys are FlowStateItems enum values
cls._flow_state = _FlowState(
{
FlowStateItems.FLOW_MUTATORS: [],
FlowStateItems.FLOW_DECORATORS: {},
FlowStateItems.CONFIGS: {},
FlowStateItems.CACHED_PARAMETERS: None,
FlowStateItems.SET_CONFIG_PARAMETERS: [],
}
)
# Keep track if configs have been processed -- this is particularly applicable
# for the Runner/Deployer where calling multiple APIs on the same flow could
# cause the configs to be processed multiple times. For a given flow, once
# the configs have been processed, we do not process them again.
cls._configs_processed = False
# We inherit stuff from our parent classes as well -- we need to be careful
# in terms of the order; we will follow the MRO with the following rules:
# - decorators will cause an error if they do not
# support multiple and we see multiple instances of the same
# - config decorators will be joined
# - configs will be added later directly by the class; base class configs will
# be taken into account as they would be inherited.
# We only need to do this for the base classes since the current class will
# get updated as decorators are parsed.
# We also need to be sure to not duplicate things. Consider something like
# class A(FlowSpec):
# pass
#
# class B(A):
# pass
#
# class C(B):
# pass
#
# C inherits from both B and A but we need to duplicate things from A only
# ONCE. To do this, we only propagate the self data from each class.
for base in cls.__mro__:
if base != cls and base != FlowSpec and issubclass(base, FlowSpec):
# Take care of decorators
base_flow_decorators = base._flow_state.self_data[
FlowStateItems.FLOW_DECORATORS
]
inherited_cls_flow_decorators = (
cls._flow_state.inherited_data.setdefault(
FlowStateItems.FLOW_DECORATORS, {}
)
)
for deco_name, deco in base_flow_decorators.items():
if not deco:
continue
deco_allow_multiple = deco[0].allow_multiple
if (
deco_name in inherited_cls_flow_decorators
and not deco_allow_multiple
):
raise DuplicateFlowDecoratorException(deco_name)
inherited_cls_flow_decorators.setdefault(deco_name, []).extend(deco)
# Take care of flow mutators -- configs are just objects in the class
# so they are naturally inherited. We do not need to do anything special
# for them.
base_mutators = base._flow_state.self_data[FlowStateItems.FLOW_MUTATORS]
if base_mutators:
cls._flow_state.inherited_data.setdefault(
FlowStateItems.FLOW_MUTATORS, []
).extend(base_mutators)
cls._init_graph()
def _init_graph(cls):
# Graph and steps are specific to the class -- store here so we can access
# in class method _process_config_decorators
cls._graph = FlowGraph(cls)
cls._steps = [getattr(cls, node.name) for node in cls._graph]
| FlowSpecMeta |
python | astropy__astropy | astropy/coordinates/representation/spherical.py | {
"start": 43506,
"end": 47175
} | class ____(BaseDifferential):
"""Differentials from points on a spherical base representation.
With cos(lat) assumed to be included in the longitude differential.
"""
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from (unit)spherical base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `~astropy.coordinates.CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates. The scale factor for
longitude does not include the cos(lat) factor.
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors(omit_coslat=True)
def _d_lon(self, base):
"""Convert longitude differential with cos(lat) to one without.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon_coslat / np.cos(base.lat)
@classmethod
def _get_d_lon_coslat(cls, d_lon, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
d_lon : `~astropy.units.Quantity`
Value of the longitude differential without ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon * np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (
isinstance(other, BaseSphericalCosLatDifferential)
and not isinstance(self, type(other))
) or isinstance(other, RadialDifferential):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {
c: op(getattr(first, c, 0.0), getattr(second, c, 0.0))
for c in all_components
}
return SphericalCosLatDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
| BaseSphericalCosLatDifferential |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 344268,
"end": 345096
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of
UpdateEnterpriseOrganizationProjectsSetting
"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "setting_value", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The ID of the enterprise on which to set the organization projects
setting.
"""
setting_value = sgqlc.types.Field(sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue), graphql_name="settingValue")
"""The value for the organization projects setting on the enterprise."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateEnterpriseOrganizationProjectsSettingInput |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 660859,
"end": 661346
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of FollowOrganization"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "organization")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
organization = sgqlc.types.Field("Organization", graphql_name="organization")
"""The organization that was followed."""
| FollowOrganizationPayload |
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 54391,
"end": 55330
} | class ____(BuiltinFunctionT):
_kwargs = {
"value": KwargSettings(UINT256_T, zero_value),
"salt": KwargSettings(BYTES32_T, empty_value),
"revert_on_failure": KwargSettings(BoolT(), True, require_literal=True),
}
_return_type = AddressT()
@process_inputs
def build_IR(self, expr, args, kwargs, context):
# errmsg something like f"Cannot use {self._id} in pure fn"
context.check_is_not_constant(f"use {self._id}", expr)
should_use_create2 = "salt" in [kwarg.arg for kwarg in expr.keywords]
if not should_use_create2:
kwargs["salt"] = CREATE2_SENTINEL
ir_builder = self._build_create_IR(expr, args, context, **kwargs)
add_gas_estimate = self._add_gas_estimate(args, should_use_create2)
return IRnode.from_list(
ir_builder, typ=AddressT(), annotation=self._id, add_gas_estimate=add_gas_estimate
)
| _CreateBase |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/visitors/query_expression.py | {
"start": 8400,
"end": 9259
} | class ____(QueryExpressionVisitor[set[str]]):
"""
Visitor that recursively computes all the metrics MRI of the `QueryExpression`.
"""
def _visit_formula(self, formula: Formula) -> set[str]:
metrics: set[str] = set()
for parameter in formula.parameters:
metrics = metrics.union(self.visit(parameter))
return metrics
def _visit_timeseries(self, timeseries: Timeseries) -> set[str]:
if timeseries.metric.mri is None:
raise InvalidMetricsQueryError("Can't determine queried metrics without a MRI")
return {timeseries.metric.mri}
def _visit_int(self, int_number: float):
return set()
def _visit_float(self, float_number: float) -> set[str]:
return set()
def _visit_string(self, string: str) -> set[str]:
return set()
| QueriedMetricsVisitor |
python | Textualize__textual | src/textual/_duration.py | {
"start": 74,
"end": 186
} | class ____(Exception):
"""
Exception indicating a general issue with a CSS duration.
"""
| DurationError |
python | TheAlgorithms__Python | cellular_automata/wa_tor.py | {
"start": 1191,
"end": 3000
} | class ____:
"""
Represents an entity (either prey or predator).
>>> e = Entity(True, coords=(0, 0))
>>> e.prey
True
>>> e.coords
(0, 0)
>>> e.alive
True
"""
def __init__(self, prey: bool, coords: tuple[int, int]) -> None:
self.prey = prey
# The (row, col) pos of the entity
self.coords = coords
self.remaining_reproduction_time = (
PREY_REPRODUCTION_TIME if prey else PREDATOR_REPRODUCTION_TIME
)
self.energy_value = None if prey is True else PREDATOR_INITIAL_ENERGY_VALUE
self.alive = True
def reset_reproduction_time(self) -> None:
"""
>>> e = Entity(True, coords=(0, 0))
>>> e.reset_reproduction_time()
>>> e.remaining_reproduction_time == PREY_REPRODUCTION_TIME
True
>>> e = Entity(False, coords=(0, 0))
>>> e.reset_reproduction_time()
>>> e.remaining_reproduction_time == PREDATOR_REPRODUCTION_TIME
True
"""
self.remaining_reproduction_time = (
PREY_REPRODUCTION_TIME if self.prey is True else PREDATOR_REPRODUCTION_TIME
)
def __repr__(self) -> str:
"""
>>> Entity(prey=True, coords=(1, 1))
Entity(prey=True, coords=(1, 1), remaining_reproduction_time=5)
>>> Entity(prey=False, coords=(2, 1)) # doctest: +NORMALIZE_WHITESPACE
Entity(prey=False, coords=(2, 1),
remaining_reproduction_time=20, energy_value=15)
"""
repr_ = (
f"Entity(prey={self.prey}, coords={self.coords}, "
f"remaining_reproduction_time={self.remaining_reproduction_time}"
)
if self.energy_value is not None:
repr_ += f", energy_value={self.energy_value}"
return f"{repr_})"
| Entity |
python | pytorch__pytorch | test/torch_np/numpy_tests/fft/test_pocketfft.py | {
"start": 12584,
"end": 14054
} | class ____(TestCase):
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [
threading.Thread(target=worker, args=(args, q)) for i in range(self.threads)
]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for _ in range(self.threads):
# under torch.dynamo `assert_array_equal` fails with relative errors of
# about 1.5e-14. Hence replace it with `assert_allclose(..., rtol=2e-14)`
assert_allclose(
q.get(timeout=5),
expected,
atol=2e-14,
# msg="Function returned wrong value in multithreaded context",
)
def test_fft(self):
a = np.ones(self.input_shape) * 1 + 0j
self._test_mtsame(np.fft.fft, a)
def test_ifft(self):
a = np.ones(self.input_shape) * 1 + 0j
self._test_mtsame(np.fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(np.fft.rfft, a)
def test_irfft(self):
a = np.ones(self.input_shape) * 1 + 0j
self._test_mtsame(np.fft.irfft, a)
if __name__ == "__main__":
run_tests()
| TestFFTThreadSafe |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query.py | {
"start": 1372,
"end": 1483
} | class ____:
foo_attribute = ...
def __init__(self):
self.foo_instance = None
| AttributeTestClass5 |
python | pandas-dev__pandas | pandas/tests/indexes/period/test_pickle.py | {
"start": 168,
"end": 692
} | class ____:
@pytest.mark.parametrize("freq", ["D", "M", "Y"])
def test_pickle_round_trip(self, freq):
idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.nan], freq=freq)
result = tm.round_trip_pickle(idx)
tm.assert_index_equal(result, idx)
def test_pickle_freq(self):
# GH#2891
prng = period_range("1/1/2011", "1/1/2012", freq="M")
new_prng = tm.round_trip_pickle(prng)
assert new_prng.freq == offsets.MonthEnd()
assert new_prng.freqstr == "M"
| TestPickle |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 3710,
"end": 3800
} | class ____(Variadic_TA2[T_co]): ...
# This should generate an error.
| VariadicChildCo_WithTA2 |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-couchbase/llama_index/vector_stores/couchbase/base.py | {
"start": 1088,
"end": 7334
} | class ____(str, Enum):
"""Enum for similarity metrics supported by Couchbase GSI."""
COSINE = "COSINE"
DOT = "DOT"
L2 = "L2"
EUCLIDEAN = "EUCLIDEAN"
L2_SQUARED = "L2_SQUARED"
EUCLIDEAN_SQUARED = "EUCLIDEAN_SQUARED"
def _transform_couchbase_filter_condition(condition: str) -> str:
"""
Convert standard metadata filter condition to Couchbase specific condition.
Args:
condition: standard metadata filter condition
Returns:
Couchbase specific condition
"""
if condition == "and":
return "conjuncts"
elif condition == "or":
return "disjuncts"
else:
raise ValueError(f"Filter condition {condition} not supported")
def _transform_couchbase_filter_operator(
operator: str, field: str, value: Any
) -> Dict[str, Any]:
"""
Convert standard metadata filter operator to Couchbase specific filter operation.
Args:
operator: standard metadata filter operator
field: metadata field
value: value to apply for the filter
Returns:
Dictionary with Couchbase specific search operation.
"""
if operator == "!=":
return {"must_not": {"disjuncts": [{"field": field, "match": value}]}}
elif operator == "==":
return {"field": field, "match": value}
elif operator == ">":
return {"min": value, "inclusive_min": False, "field": field}
elif operator == "<":
return {"max": value, "inclusive_max": False, "field": field}
elif operator == ">=":
return {"min": value, "inclusive_min": True, "field": field}
elif operator == "<=":
return {"max": value, "inclusive_max": True, "field": field}
elif operator == "text_match":
return {"match_phrase": value, "field": field}
else:
raise ValueError(f"Filter operator {operator} not supported")
def _to_couchbase_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""
Convert standard filters to Couchbase filter.
Args:
standard_filters (str): Standard Llama-index filters.
Returns:
Dictionary with Couchbase search query.
"""
filters = {}
filters_list = []
condition = standard_filters.condition
condition = _transform_couchbase_filter_condition(condition)
if standard_filters.filters:
for filter in standard_filters.filters:
if filter.operator:
transformed_filter = _transform_couchbase_filter_operator(
filter.operator, f"metadata.{filter.key}", filter.value
)
filters_list.append(transformed_filter)
else:
filters_list.append(
{
"match": {
"field": f"metadata.{filter.key}",
"value": filter.value,
}
}
)
if len(filters_list) == 1:
# If there is only one filter, return it directly
return filters_list[0]
elif len(filters_list) > 1:
filters[condition] = filters_list
return {"query": filters}
def _convert_llamaindex_filters_to_sql(
filters: MetadataFilters, metadata_key: str
) -> str:
"""
Convert LlamaIndex MetadataFilters to SQL++ WHERE clause.
Args:
filters: LlamaIndex MetadataFilters object
metadata_key: The metadata field prefix for the document
Returns:
SQL++ WHERE clause string
"""
if not filters or not filters.filters:
return ""
def _build_condition(filter_item: Any) -> str:
"""Build a single SQL++ condition from a MetadataFilter."""
field_name = f"d.{metadata_key}.{filter_item.key}"
if filter_item.operator == FilterOperator.EQ:
if isinstance(filter_item.value, str):
return f"{field_name} = '{filter_item.value}'"
else:
return f"{field_name} = {filter_item.value}"
elif filter_item.operator == FilterOperator.NE:
if isinstance(filter_item.value, str):
return f"{field_name} != '{filter_item.value}'"
else:
return f"{field_name} != {filter_item.value}"
elif filter_item.operator == FilterOperator.GT:
return f"{field_name} > {filter_item.value}"
elif filter_item.operator == FilterOperator.GTE:
return f"{field_name} >= {filter_item.value}"
elif filter_item.operator == FilterOperator.LT:
return f"{field_name} < {filter_item.value}"
elif filter_item.operator == FilterOperator.LTE:
return f"{field_name} <= {filter_item.value}"
elif filter_item.operator == FilterOperator.IN:
if isinstance(filter_item.value, list):
values = ", ".join(
[
f"'{v}'" if isinstance(v, str) else str(v)
for v in filter_item.value
]
)
return f"{field_name} IN [{values}]"
else:
raise ValueError(
f"'in' operator expects a list value, got {type(filter_item.value)}"
)
else:
raise ValueError(f"Unsupported filter operator: {filter_item.operator}")
# Build conditions for all filters
filter_conditions = []
for filter_item in filters.filters:
if isinstance(filter_item, MetadataFilter):
condition = _build_condition(filter_item)
filter_conditions.append(condition)
elif isinstance(filter_item, MetadataFilters):
condition = (
"("
+ _convert_llamaindex_filters_to_sql(filter_item, metadata_key)
+ ")"
)
filter_conditions.append(condition)
else:
logger.warning(f"Unsupported filter type: {type(filter_item)}")
continue
if not filter_conditions:
return ""
# Join conditions based on the filter condition (AND/OR)
condition_connector = " AND " if filters.condition == "and" else " OR "
return condition_connector.join(filter_conditions)
| QueryVectorSearchSimilarity |
python | huggingface__transformers | src/transformers/models/cpmant/modeling_cpmant.py | {
"start": 16682,
"end": 20802
} | class ____(nn.Module):
def __init__(self, config: CpmAntConfig):
super().__init__()
self.num_heads = config.num_attention_heads
self.num_buckets = config.position_bias_num_buckets
self.max_distance = config.position_bias_max_distance
self.num_segments = config.segment_types
self.relative_attention_bias = nn.Parameter(
torch.empty(
config.segment_types * config.segment_types + config.position_bias_num_buckets,
config.num_attention_heads,
)
)
def forward(
self,
key_pos: torch.Tensor,
query_pos: torch.Tensor,
key_segment: torch.Tensor,
query_segment: torch.Tensor,
):
with torch.no_grad():
batch = key_pos.size(0)
keylen = key_pos.size(1)
querylen = query_pos.size(1)
if key_pos.size(0) != query_pos.size(0):
raise AssertionError(
f"key_pos.size(0) should be equal to query_pos.size(0), but got {key_pos.size(0)} and {query_pos.size(0)}!"
)
if keylen != key_segment.size(1) or querylen != query_segment.size(1):
raise AssertionError(
f"keylen should be equal to key_segment.size(1), but got {keylen} and {key_segment.size(1)}!"
)
if querylen != query_segment.size(1):
raise AssertionError(
f"querylen should be equal to query_segment.size(1), but got {querylen} and {query_segment.size(1)}!"
)
key_pos = key_pos.view(batch, -1, keylen)
query_pos = query_pos.view(batch, querylen, -1)
key_segment = key_segment.view(batch, -1, keylen)
query_segment = query_segment.view(batch, querylen, -1)
relative_position_bucket = self._segment_relative_position_bucket(query_segment, key_segment)
relative_position_bucket = relative_position_bucket + self.num_buckets
# (batch, len_q, len_k)
absolute_position_bucket = self._position_bucket(
torch.arange(keylen, dtype=torch.int32, device=relative_position_bucket.device)[None, :]
- torch.arange(querylen, dtype=torch.int32, device=relative_position_bucket.device)[:, None],
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
relative_position_bucket = torch.where(
(key_segment == query_segment),
absolute_position_bucket[None, :, :],
relative_position_bucket,
)
# (batch, len_q, len_k, num_heads)
embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
# (batch, num_heads, len_q, len_k)
embeds = embeds.permute(0, 3, 1, 2).contiguous()
return embeds
def _segment_relative_position_bucket(self, query_segment, key_segment):
return query_segment * self.num_segments + key_segment
def _position_bucket(self, relative_position, num_buckets=32, max_distance=128):
relative_buckets = 0
# always bidirectional in CPMAnt
num_buckets //= 2
relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
relative_position = torch.abs(relative_position)
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.int32)
relative_position_if_large = torch.min(
relative_position_if_large,
torch.full_like(relative_position_if_large, num_buckets - 1),
)
relative_buckets += torch.where(is_small, relative_position.to(torch.int32), relative_position_if_large)
return relative_buckets
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->CPMAnt
| CpmAntSegmentPositionEmbedding |
python | pypa__pip | src/pip/_vendor/packaging/requirements.py | {
"start": 482,
"end": 607
} | class ____(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
| InvalidRequirement |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 39516,
"end": 40151
} | class ____(VOWarning, ValueError):
r"""Invalid boolean value.
A ``boolean`` value should be one of the following strings (case
insensitive) in the ``TABLEDATA`` format::
'TRUE', 'FALSE', '1', '0', 'T', 'F', '\0', ' ', '?'
and in ``BINARY`` format::
'T', 'F', '1', '0', '\0', ' ', '?'
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid boolean value '{}'"
default_args = ("x",)
| E05 |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 3571,
"end": 4707
} | class ____(Benchmark):
r"""
Michalewicz objective function.
This class defines the Michalewicz [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Michalewicz}}(x) = - \sum_{i=1}^{2} \sin\left(x_i\right)
\sin^{2 m}\left(\frac{i x_i^{2}}{\pi}\right)
Where, in this exercise, :math:`m = 10`.
with :math:`x_i \in [0, \pi]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = -1.8013` for :math:`x = [0, 0]`
.. [1] Adorio, E. MVF - "Multivariate Test Functions Library in C for
Unconstrained Global Optimization", 2005
TODO: could change dimensionality, but global minimum might change.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [pi] * self.N))
self.global_optimum = [[2.20290555, 1.570796]]
self.fglob = -1.8013
def fun(self, x, *args):
self.nfev += 1
m = 10.0
i = arange(1, self.N + 1)
return -sum(sin(x) * sin(i * x ** 2 / pi) ** (2 * m))
| Michalewicz |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/image_ops/decode_png_op_test.py | {
"start": 1022,
"end": 1981
} | class ____(test.TestCase):
def test16bit(self):
img_bytes = [[0, 255], [1024, 1024 + 255]]
# Encoded PNG bytes resulting from encoding the above img_bytes
# using go's image/png encoder.
encoded_bytes = [
137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 0,
2, 0, 0, 0, 2, 16, 0, 0, 0, 0, 7, 77, 142, 187, 0, 0, 0, 21, 73, 68, 65,
84, 120, 156, 98, 98, 96, 96, 248, 207, 194, 2, 36, 1, 1, 0, 0, 255,
255, 6, 60, 1, 10, 68, 160, 26, 131, 0, 0, 0, 0, 73, 69, 78, 68, 174,
66, 96, 130
]
byte_string = bytes(bytearray(encoded_bytes))
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
decode = array_ops.squeeze(
image_ops.decode_png(
img_in, dtype=dtypes.uint16))
with self.cached_session():
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)
if __name__ == "__main__":
test.main()
| DecodePngOpTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 224439,
"end": 224868
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "job_status_id", "project")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
job_status_id = sgqlc.types.Field(String, graphql_name="jobStatusId")
project = sgqlc.types.Field("Project", graphql_name="project")
| CloneProjectPayload |
python | doocs__leetcode | solution/3300-3399/3372.Maximize the Number of Target Nodes After Connecting Trees I/Solution.py | {
"start": 0,
"end": 864
} | class ____:
def maxTargetNodes(
self, edges1: List[List[int]], edges2: List[List[int]], k: int
) -> List[int]:
def build(edges: List[List[int]]) -> List[List[int]]:
n = len(edges) + 1
g = [[] for _ in range(n)]
for a, b in edges:
g[a].append(b)
g[b].append(a)
return g
def dfs(g: List[List[int]], a: int, fa: int, d: int) -> int:
if d < 0:
return 0
cnt = 1
for b in g[a]:
if b != fa:
cnt += dfs(g, b, a, d - 1)
return cnt
g2 = build(edges2)
m = len(edges2) + 1
t = max(dfs(g2, i, -1, k - 1) for i in range(m))
g1 = build(edges1)
n = len(edges1) + 1
return [dfs(g1, i, -1, k) + t for i in range(n)]
| Solution |
python | python__mypy | mypy/types.py | {
"start": 42407,
"end": 46188
} | class ____(ProperType):
"""The type 'Any'."""
__slots__ = ("type_of_any", "source_any", "missing_import_name")
def __init__(
self,
type_of_any: int,
source_any: AnyType | None = None,
missing_import_name: str | None = None,
line: int = -1,
column: int = -1,
) -> None:
super().__init__(line, column)
self.type_of_any = type_of_any
# If this Any was created as a result of interacting with another 'Any', record the source
# and use it in reports.
self.source_any = source_any
if source_any and source_any.source_any:
self.source_any = source_any.source_any
if source_any is None:
self.missing_import_name = missing_import_name
else:
self.missing_import_name = source_any.missing_import_name
# Only unimported type anys and anys from other anys should have an import name
assert missing_import_name is None or type_of_any in (
TypeOfAny.from_unimported_type,
TypeOfAny.from_another_any,
)
# Only Anys that come from another Any can have source_any.
assert type_of_any != TypeOfAny.from_another_any or source_any is not None
# We should not have chains of Anys.
assert not self.source_any or self.source_any.type_of_any != TypeOfAny.from_another_any
@property
def is_from_error(self) -> bool:
return self.type_of_any == TypeOfAny.from_error
def accept(self, visitor: TypeVisitor[T]) -> T:
return visitor.visit_any(self)
def copy_modified(
self,
# Mark with Bogus because _dummy is just an object (with type Any)
type_of_any: int = _dummy_int,
original_any: Bogus[AnyType | None] = _dummy,
missing_import_name: Bogus[str | None] = _dummy,
) -> AnyType:
if type_of_any == _dummy_int:
type_of_any = self.type_of_any
if original_any is _dummy:
original_any = self.source_any
if missing_import_name is _dummy:
missing_import_name = self.missing_import_name
return AnyType(
type_of_any=type_of_any,
source_any=original_any,
missing_import_name=missing_import_name,
line=self.line,
column=self.column,
)
def __hash__(self) -> int:
return hash(AnyType)
def __eq__(self, other: object) -> bool:
return isinstance(other, AnyType)
def serialize(self) -> JsonDict:
return {
".class": "AnyType",
"type_of_any": self.type_of_any,
"source_any": self.source_any.serialize() if self.source_any is not None else None,
"missing_import_name": self.missing_import_name,
}
@classmethod
def deserialize(cls, data: JsonDict) -> AnyType:
assert data[".class"] == "AnyType"
source = data["source_any"]
return AnyType(
data["type_of_any"],
AnyType.deserialize(source) if source is not None else None,
data["missing_import_name"],
)
def write(self, data: WriteBuffer) -> None:
write_tag(data, ANY_TYPE)
write_type_opt(data, self.source_any)
write_int(data, self.type_of_any)
write_str_opt(data, self.missing_import_name)
write_tag(data, END_TAG)
@classmethod
def read(cls, data: ReadBuffer) -> AnyType:
tag = read_tag(data)
if tag != LITERAL_NONE:
assert tag == ANY_TYPE
source_any = AnyType.read(data)
else:
source_any = None
ret = AnyType(read_int(data), source_any, read_str_opt(data))
assert read_tag(data) == END_TAG
return ret
| AnyType |
python | aimacode__aima-python | deep_learning4e.py | {
"start": 2584,
"end": 2718
} | class ____(Activation):
def function(self, x):
return x
def derivative(self, x):
return np.ones_like(x)
| Linear |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 22665,
"end": 24295
} | class ____(unittest.TestCase):
"""Tests person in the es_CO locale"""
def setUp(self):
self.fake = Faker("es_CO")
Faker.seed(0)
def test_first_names(self):
# General first name
name = self.fake.first_name()
self.assertIsInstance(name, str)
assert name in EsCOProvider.first_names
# Female first name
name = self.fake.first_name_female()
self.assertIsInstance(name, str)
assert name in EsCOProvider.first_names
assert name in EsCOProvider.first_names_female
# Male first name
name = self.fake.first_name_male()
self.assertIsInstance(name, str)
assert name in EsCOProvider.first_names
assert name in EsCOProvider.first_names_male
def test_last_names(self):
# General last name
name = self.fake.last_name()
self.assertIsInstance(name, str)
assert name in EsCOProvider.last_names
# Female last name
name = self.fake.last_name_female()
self.assertIsInstance(name, str)
assert name in EsCOProvider.last_names
# Male last name
name = self.fake.last_name_male()
self.assertIsInstance(name, str)
assert name in EsCOProvider.last_names
def test_prefix(self):
# Female prefix
prefix = self.fake.prefix_female()
self.assertIsInstance(prefix, str)
assert prefix in EsCOProvider.prefixes_female
# Male prefix
prefix = self.fake.prefix_male()
self.assertIsInstance(prefix, str)
assert prefix in EsCOProvider.prefixes_male
| TestEsCO |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 14836,
"end": 20833
} | class ____(DashboardComponent, MemoryColor):
"""Memory usage for single workers"""
@log_errors
def __init__(self, scheduler, width=600, **kwargs):
DashboardComponent.__init__(self)
MemoryColor.__init__(self)
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"width": [],
"x": [],
"y": [],
"color": [],
"alpha": [],
"worker": [],
"escaped_worker": [],
"proc_memory": [],
"managed": [],
"unmanaged_old": [],
"unmanaged_recent": [],
"spilled": [],
}
)
self.root = figure(
title="Bytes stored per worker",
tools="",
width=int(width / 2),
name="workers_memory",
min_border_bottom=50,
**kwargs,
)
rect = self.root.rect(
source=self.source,
x="x",
y="y",
width="width",
height=0.9,
color="color",
fill_alpha="alpha",
line_width=0,
)
rect.nonselection_glyph = None
self.root.axis[0].ticker = BasicTicker(**TICKS_1024)
self.root.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b")
self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION
self.root.xaxis.minor_tick_line_alpha = 0
self.root.x_range = Range1d(start=0)
self.root.yaxis.visible = False
self.root.ygrid.visible = False
self.root.toolbar_location = None
tap = TapTool(callback=OpenURL(url="./info/worker/@escaped_worker.html"))
self.root.add_tools(tap)
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 12px; font-weight: bold;">Worker:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@worker</span>
</div>
<div>
<span style="font-size: 12px; font-weight: bold;">Process memory (RSS):</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@proc_memory{0.00 b}</span>
</div>
<div style="margin-left: 1em;">
<span style="font-size: 12px; font-weight: bold;">Managed:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@managed{0.00 b}</span>
</div>
<div style="margin-left: 1em;">
<span style="font-size: 12px; font-weight: bold;">Unmanaged (old):</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@unmanaged_old{0.00 b}</span>
</div>
<div style="margin-left: 1em;">
<span style="font-size: 12px; font-weight: bold;">Unmanaged (recent):</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@unmanaged_recent{0.00 b}</span>
</div>
<div>
<span style="font-size: 12px; font-weight: bold;">Spilled to disk:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@spilled{0.00 b}</span>
</div>
""",
)
self.root.add_tools(hover)
@without_property_validation
@log_errors
def update(self):
def quadlist(i: Iterable[T]) -> list[T]:
out = []
for ii in i:
out += [ii, ii, ii, ii]
return out
workers = self.scheduler.workers.values()
width = []
x = []
color = []
max_limit = 0
procmemory = []
managed = []
spilled = []
unmanaged_old = []
unmanaged_recent = []
for ws in workers:
meminfo = ws.memory
limit = getattr(ws, "memory_limit", 0)
max_limit = max(max_limit, limit, meminfo.process + meminfo.spilled)
color_i = self._memory_color(meminfo.process, limit, ws.status)
width += [
meminfo.managed,
meminfo.unmanaged_old,
meminfo.unmanaged_recent,
meminfo.spilled,
]
x += [sum(width[-4:i]) + width[i] / 2 for i in range(-4, 0)]
color += [color_i, color_i, color_i, "grey"]
# memory info
procmemory.append(meminfo.process)
managed.append(meminfo.managed)
unmanaged_old.append(meminfo.unmanaged_old)
unmanaged_recent.append(meminfo.unmanaged_recent)
spilled.append(meminfo.spilled)
result = {
"width": width,
"x": x,
"color": color,
"alpha": [1, 0.7, 0.4, 1] * len(workers),
"worker": quadlist(ws.address for ws in workers),
"escaped_worker": quadlist(url_escape(ws.address) for ws in workers),
"y": quadlist(range(len(workers))),
"proc_memory": quadlist(procmemory),
"managed": quadlist(managed),
"unmanaged_old": quadlist(unmanaged_old),
"unmanaged_recent": quadlist(unmanaged_recent),
"spilled": quadlist(spilled),
}
# Remove rectangles with width=0
result = {k: [vi for vi, w in zip(v, width) if w] for k, v in result.items()}
self.root.x_range.end = max_limit
update(self.source, result)
| WorkersMemory |
python | bokeh__bokeh | tests/unit/bokeh/models/widgets/test_slider.py | {
"start": 1947,
"end": 2866
} | class ____:
def test_value_and_value_throttled(self) -> None:
s0 = mws.RangeSlider(start=0, end=10)
with pytest.raises(UnsetValueError):
s0.value
with pytest.raises(UnsetValueError):
s0.value_throttled
s1 = mws.RangeSlider(start=0, end=10, value=(4, 6))
assert s1.value == (4, 6)
assert s1.value_throttled == (4, 6)
def test_rangeslider_equal_start_end_validation(self, caplog: pytest.LogCaptureFixture) -> None:
start = 0
end = 10
s = mws.RangeSlider(start=start, end=end)
#with caplog.at_level(logging.ERROR, logger='bokeh.core.validation.check'):
with caplog.at_level(logging.ERROR):
assert len(caplog.records) == 0
s.end = 0
issues = check_integrity([s])
process_validation_issues(issues)
assert len(caplog.records) == 1
| TestRangeSlider |
python | getsentry__sentry | src/sentry/integrations/web/vsts_extension_configuration.py | {
"start": 237,
"end": 1618
} | class ____(IntegrationExtensionConfigurationView):
provider = IntegrationProviderSlug.AZURE_DEVOPS.value
external_provider_key = "vsts-extension"
def _is_valid_account_name(self, account_name: str) -> bool:
"""Validates the Azure DevOps account name
https://learn.microsoft.com/en-us/azure/devops/organizations/accounts/rename-organization?view=azure-devops#rename-your-organization
> Adhere to the following guidelines when you create an organization name.
>
> Use only letters from the English alphabet
> Start your organization name with a letter or number
> Use letters, numbers, or hyphens after the initial character
> Ensure that your organization doesn't exceed 50 Unicode characters
> End with a letter or number
"""
pattern = r"^[A-Za-z0-9][A-Za-z0-9-]{0,48}[A-Za-z0-9]$"
return bool(re.match(pattern, account_name))
def map_params_to_state(self, params):
for param in ["targetId", "targetName"]:
if param not in params:
raise ValueError(f"Missing required {param} parameter")
if not self._is_valid_account_name(params["targetName"]):
raise ValueError("Invalid targetName parameter")
return {"accountId": params["targetId"], "accountName": params["targetName"]}
| VstsExtensionConfigurationView |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/cascade_delete_relationships/tutorial001.py | {
"start": 120,
"end": 359
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
headquarters: str
heroes: List["Hero"] = Relationship(back_populates="team", cascade_delete=True)
| Team |
python | pyparsing__pyparsing | pyparsing/util.py | {
"start": 2704,
"end": 3214
} | class ____:
def __init__(self):
cache = {}
cache_get = cache.get
self.not_in_cache = not_in_cache = object()
def get(_, key):
return cache_get(key, not_in_cache)
def set_(_, key, value):
cache[key] = value
def clear(_):
cache.clear()
self.size = None
self.get = types.MethodType(get, self)
self.set = types.MethodType(set_, self)
self.clear = types.MethodType(clear, self)
| _UnboundedCache |
python | apache__airflow | providers/databricks/tests/unit/databricks/hooks/test_databricks.py | {
"start": 64810,
"end": 66840
} | class ____:
"""
Tests for DatabricksHook when auth is done with AAD leveraging Managed Identity authentication
"""
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="databricks",
host=HOST,
login=None,
password=None,
extra=json.dumps(
{
"use_azure_managed_identity": True,
}
),
)
)
self.hook = DatabricksHook(retry_args=DEFAULT_RETRY_ARGS)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
@mock.patch.object(azure.identity, "ManagedIdentityCredential")
def test_submit_run(self, mock_azure_identity, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.side_effect = [
create_successful_response_mock({"compute": {"azEnvironment": "AZUREPUBLICCLOUD"}}),
]
mock_requests.post.side_effect = [
create_successful_response_mock({"run_id": "1"}),
]
mock_azure_identity().get_token.return_value = create_aad_token_for_resource()
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {"notebook_task": NOTEBOOK_TASK, "new_cluster": NEW_CLUSTER}
run_id = self.hook.submit_run(data)
ad_call_args = mock_requests.method_calls[0]
assert ad_call_args[1][0] == AZURE_METADATA_SERVICE_INSTANCE_URL
assert ad_call_args[2]["params"]["api-version"] > "2018-02-01"
assert ad_call_args[2]["headers"]["Metadata"] == "true"
assert run_id == "1"
args = mock_requests.post.call_args
kwargs = args[1]
assert kwargs["auth"].token == TOKEN
@pytest.mark.db_test
| TestDatabricksHookAadTokenManagedIdentity |
python | huggingface__transformers | src/transformers/models/gemma3n/modular_gemma3n.py | {
"start": 103313,
"end": 103431
} | class ____(Gemma3ForCausalLM):
_checkpoint_conversion_mapping = {"model.language_model": "model"}
| Gemma3nForCausalLM |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_pii.py | {
"start": 11387,
"end": 17641
} | class ____:
"""Test PIIMiddleware integration with agent."""
def test_apply_to_input_only(self):
"""Test that middleware only processes input when configured."""
middleware = PIIMiddleware(
"email", strategy="redact", apply_to_input=True, apply_to_output=False
) # noqa: E501
# Should process HumanMessage
state = {"messages": [HumanMessage("Email: test@example.com")]}
result = middleware.before_model(state, None)
assert result is not None
assert "[REDACTED_EMAIL]" in result["messages"][0].content
# Should not process AIMessage
state = {"messages": [AIMessage("My email is ai@example.com")]}
result = middleware.after_model(state, None)
assert result is None
def test_apply_to_output_only(self):
"""Test that middleware only processes output when configured."""
middleware = PIIMiddleware(
"email", strategy="redact", apply_to_input=False, apply_to_output=True
) # noqa: E501
# Should not process HumanMessage
state = {"messages": [HumanMessage("Email: test@example.com")]}
result = middleware.before_model(state, None)
assert result is None
# Should process AIMessage
state = {"messages": [AIMessage("My email is ai@example.com")]}
result = middleware.after_model(state, None)
assert result is not None
assert "[REDACTED_EMAIL]" in result["messages"][0].content
def test_apply_to_both(self):
"""Test that middleware processes both input and output."""
middleware = PIIMiddleware(
"email", strategy="redact", apply_to_input=True, apply_to_output=True
) # noqa: E501
# Should process HumanMessage
state = {"messages": [HumanMessage("Email: test@example.com")]}
result = middleware.before_model(state, None)
assert result is not None
# Should process AIMessage
state = {"messages": [AIMessage("My email is ai@example.com")]}
result = middleware.after_model(state, None)
assert result is not None
def test_no_pii_returns_none(self):
"""Test that middleware returns None when no PII detected."""
middleware = PIIMiddleware("email", strategy="redact")
state = {"messages": [HumanMessage("No PII here")]}
result = middleware.before_model(state, None)
assert result is None
def test_empty_messages(self):
"""Test that middleware handles empty messages gracefully."""
middleware = PIIMiddleware("email", strategy="redact")
state = {"messages": []}
result = middleware.before_model(state, None)
assert result is None
def test_apply_to_tool_results(self):
"""Test that middleware processes tool results when enabled."""
middleware = PIIMiddleware(
"email", strategy="redact", apply_to_input=False, apply_to_tool_results=True
)
# Simulate a conversation with tool call and result containing PII
state = {
"messages": [
HumanMessage("Search for John"),
AIMessage(
content="",
tool_calls=[ToolCall(name="search", args={}, id="call_123", type="tool_call")],
),
ToolMessage(content="Found: john@example.com", tool_call_id="call_123"),
]
}
result = middleware.before_model(state, None)
assert result is not None
# Check that the tool message was redacted
tool_msg = result["messages"][2]
assert isinstance(tool_msg, ToolMessage)
assert "[REDACTED_EMAIL]" in tool_msg.content
assert "john@example.com" not in tool_msg.content
def test_apply_to_tool_results_mask_strategy(self):
"""Test that mask strategy works for tool results."""
middleware = PIIMiddleware(
"ip", strategy="mask", apply_to_input=False, apply_to_tool_results=True
)
state = {
"messages": [
HumanMessage("Get server IP"),
AIMessage(
content="",
tool_calls=[ToolCall(name="get_ip", args={}, id="call_456", type="tool_call")],
),
ToolMessage(content="Server IP: 192.168.1.100", tool_call_id="call_456"),
]
}
result = middleware.before_model(state, None)
assert result is not None
tool_msg = result["messages"][2]
assert "*.*.*.100" in tool_msg.content
assert "192.168.1.100" not in tool_msg.content
def test_apply_to_tool_results_block_strategy(self):
"""Test that block strategy raises error for PII in tool results."""
middleware = PIIMiddleware(
"email", strategy="block", apply_to_input=False, apply_to_tool_results=True
)
state = {
"messages": [
HumanMessage("Search for user"),
AIMessage(
content="",
tool_calls=[ToolCall(name="search", args={}, id="call_789", type="tool_call")],
),
ToolMessage(content="User email: sensitive@example.com", tool_call_id="call_789"),
]
}
with pytest.raises(PIIDetectionError) as exc_info:
middleware.before_model(state, None)
assert exc_info.value.pii_type == "email"
assert len(exc_info.value.matches) == 1
def test_with_agent(self):
"""Test PIIMiddleware integrated with create_agent."""
model = FakeToolCallingModel(responses=[AIMessage(content="Thanks for sharing!")])
agent = create_agent(
model=model,
middleware=[PIIMiddleware("email", strategy="redact")],
)
# Invoke (agent is already compiled)
result = agent.invoke({"messages": [HumanMessage("Email: test@example.com")]})
# Check that email was redacted in the stored messages
# The first message should have been processed
messages = result["messages"]
assert any("[REDACTED_EMAIL]" in str(msg.content) for msg in messages)
| TestPIIMiddlewareIntegration |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_ec2.py | {
"start": 7048,
"end": 8582
} | class ____(BaseEc2TestClass):
def test_init(self):
ec2_operator = EC2StopInstanceOperator(
task_id="task_test",
instance_id="i-123abc",
aws_conn_id="aws_conn_test",
region_name="region-test",
check_interval=3,
)
assert ec2_operator.task_id == "task_test"
assert ec2_operator.instance_id == "i-123abc"
assert ec2_operator.aws_conn_id == "aws_conn_test"
assert ec2_operator.region_name == "region-test"
assert ec2_operator.check_interval == 3
@mock_aws
def test_stop_instance(self):
# create instance
ec2_hook = EC2Hook()
create_instance = EC2CreateInstanceOperator(
image_id=self._get_image_id(ec2_hook),
task_id="test_create_instance",
)
instance_id = create_instance.execute(None)
# stop instance
stop_test = EC2StopInstanceOperator(
task_id="stop_test",
instance_id=instance_id[0],
)
stop_test.execute(None)
# assert instance state is running
assert ec2_hook.get_instance_state(instance_id=instance_id[0]) == "stopped"
def test_template_fields(self):
ec2_operator = EC2StopInstanceOperator(
task_id="task_test",
instance_id="i-123abc",
aws_conn_id="aws_conn_test",
region_name="region-test",
check_interval=3,
)
validate_template_fields(ec2_operator)
| TestEC2StopInstanceOperator |
python | getsentry__sentry | src/sentry/api/serializers/rest_framework/rule.py | {
"start": 5445,
"end": 5926
} | class ____(serializers.Serializer):
name = serializers.CharField(
max_length=256, required=False, allow_null=True, allow_blank=True, default="Test Alert"
)
actions = serializers.ListField(child=RuleNodeField(type="action/event"), required=False)
def validate_name(self, name):
if name == "" or name is None:
return "Test Alert"
return name
def validate(self, attrs):
return validate_actions(attrs)
| DummyRuleSerializer |
python | has2k1__plotnine | plotnine/_mpl/text.py | {
"start": 430,
"end": 6571
} | class ____(Text):
"""
Strip Text
"""
draw_info: strip_draw_info
patch: StripTextPatch
def __init__(self, info: strip_draw_info):
kwargs = {
"rotation": info.rotation,
"transform": info.ax.transAxes,
"clip_on": False,
"zorder": 3.3,
# Since the text can be rotated, it is simpler to anchor it at
# the center, align it, then do the rotation. Vertically,
# center_baseline places the text in the visual center, but
# only if it is one line. For multiline text, we are better
# off with plain center.
"ha": "center",
"va": "center_baseline" if info.is_oneline else "center",
"rotation_mode": "anchor",
}
super().__init__(0, 0, info.label, **kwargs)
self.draw_info = info
self.patch = StripTextPatch(self)
# TODO: This should really be part of the unit conversions in the
# margin class.
@lru_cache(2)
def _line_height(self, renderer) -> float:
"""
The line height in display space of the text on the canvas
"""
# Text string, (width, height), x, y
parts: list[tuple[str, tuple[float, float], float, float]]
try:
# matplotlib.Text._get_layout is a private API and we cannot
# tell how using it may fail in the future.
_, parts, _ = self._get_layout(renderer) # pyright: ignore[reportAttributeAccessIssue]
except Exception:
from warnings import warn
from plotnine.exceptions import PlotnineWarning
# The canvas height is nearly always bigger than the stated
# fontsize. 1.36 is a good multiplication factor obtained by
# some rough exploration
f = 1.36
size = self.get_fontsize()
height = round(size * f) if isinstance(size, int) else 14
warn(
f"Could not calculate line height for {self.get_text()}. "
"Using an estimate, please let us know about this at "
"https://github.com/has2k1/plotnine/issues",
PlotnineWarning,
)
else:
# The the text has multiple lines, we use the maximum height
# of anyone single line.
height = max([p[1][1] for p in parts])
return height
def _set_position(self, renderer):
"""
Set the postion of the text within the strip_background
"""
# We have to two premises that depend on each other:
#
# 1. The breadth of the strip_background grows to accomodate
# the strip_text.
# 2. The strip_text is justified within the strip_background.
#
# From these we note that the strip_background does not need the
# position of the strip_text, but it needs its size. Therefore
# we implement StripTextPatch.get_window_extent can use
# StripText.get_window_extent, peeking only at the size.
#
# And we implement StripText._set_position_* to use
# StripTextPatch.get_window_extent and make the calculations in
# both methods independent.
if self.draw_info.position == "top":
self._set_position_top(renderer)
else: # "right"
self._set_position_right(renderer)
def _set_position_top(self, renderer):
"""
Set position of the text within the top strip_background
"""
info = self.draw_info
ha, va, ax, m = info.ha, info.va, info.ax, info.margin
rel_x, rel_y = ha_as_float(ha), va_as_float(va)
patch_bbox = bbox_in_axes_space(self.patch, ax, renderer)
text_bbox = bbox_in_axes_space(self, ax, renderer)
# line_height and margins in axes space
line_height = self._line_height(renderer) / ax.bbox.height
x = (
# Justify horizontally within the strip_background
rel_position(
rel_x,
text_bbox.width + (line_height * (m.l + m.r)),
patch_bbox.x0,
patch_bbox.x1,
)
+ (m.l * line_height)
+ text_bbox.width / 2
)
# Setting the y position based on the bounding box is wrong
y = (
rel_position(
rel_y,
text_bbox.height,
patch_bbox.y0 + m.b * line_height,
patch_bbox.y1 - m.t * line_height,
)
+ text_bbox.height / 2
)
self.set_position((x, y))
def _set_position_right(self, renderer):
"""
Set position of the text within the bottom strip_background
"""
info = self.draw_info
ha, va, ax, m = info.ha, info.va, info.ax, info.margin
# bboxes in axes space
patch_bbox = bbox_in_axes_space(self.patch, ax, renderer)
text_bbox = bbox_in_axes_space(self, ax, renderer)
# line_height in axes space
line_height = self._line_height(renderer) / ax.bbox.width
rel_x, rel_y = ha_as_float(ha), va_as_float(va)
x = (
rel_position(
rel_x,
text_bbox.width,
patch_bbox.x0 + m.l * line_height,
patch_bbox.x1 - m.r * line_height,
)
+ text_bbox.width / 2
)
y = (
# Justify vertically within the strip_background
rel_position(
rel_y,
text_bbox.height + ((m.b + m.t) * line_height),
patch_bbox.y0,
patch_bbox.y1,
)
+ (m.b * line_height)
+ text_bbox.height / 2
)
self.set_position((x, y))
@artist.allow_rasterization
def draw(self, renderer: RendererBase):
"""
Draw text along with the patch
"""
if not self.get_visible():
return
self._set_position(renderer)
self.patch.draw(renderer)
return super().draw(renderer)
| StripText |
python | Textualize__textual | docs/examples/widgets/select_widget_no_blank.py | {
"start": 517,
"end": 1053
} | class ____(App):
CSS_PATH = "select.tcss"
BINDINGS = [("s", "swap", "Swap Select options")]
def compose(self) -> ComposeResult:
yield Header()
yield Select(zip(LINES, LINES), allow_blank=False)
@on(Select.Changed)
def select_changed(self, event: Select.Changed) -> None:
self.title = str(event.value)
def action_swap(self) -> None:
self.query_one(Select).set_options(zip(ALTERNATE_LINES, ALTERNATE_LINES))
if __name__ == "__main__":
app = SelectApp()
app.run()
| SelectApp |
python | aimacode__aima-python | learning4e.py | {
"start": 31176,
"end": 38738
} | class ____:
"""An ensemble of Decision Trees trained using bagging and feature bagging."""
def __init__(self, dataset, n=5):
self.dataset = dataset
self.n = n
self.predictors = [DecisionTreeLearner(DataSet(examples=self.data_bagging(), attrs=self.dataset.attrs,
attr_names=self.dataset.attr_names, target=self.dataset.target,
inputs=self.feature_bagging())) for _ in range(self.n)]
def data_bagging(self, m=0):
"""Sample m examples with replacement"""
n = len(self.dataset.examples)
return weighted_sample_with_replacement(m or n, self.dataset.examples, [1] * n)
def feature_bagging(self, p=0.7):
"""Feature bagging with probability p to retain an attribute"""
inputs = [i for i in self.dataset.inputs if probability(p)]
return inputs or self.dataset.inputs
def predict(self, example):
return mode(predictor.predict(example) for predictor in self.predictors)
def WeightedLearner(unweighted_learner):
"""
[Page 749 footnote 14]
Given a learner that takes just an unweighted dataset, return
one that takes also a weight for each example.
"""
def train(dataset, weights):
dataset = replicated_dataset(dataset, weights)
n_samples, n_features = len(dataset.examples), dataset.target
X, y = (np.array([x[:n_features] for x in dataset.examples]),
np.array([x[n_features] for x in dataset.examples]))
return unweighted_learner.fit(X, y)
return train
def replicated_dataset(dataset, weights, n=None):
"""Copy dataset, replicating each example in proportion to its weight."""
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result
def weighted_replicate(seq, weights, n):
"""
Return n selections from seq, with the count of each element of
seq proportional to the corresponding weight (filling in fractions
randomly).
>>> weighted_replicate('ABC', [1, 2, 1], 4)
['A', 'B', 'B', 'C']
"""
assert len(seq) == len(weights)
weights = normalize(weights)
wholes = [int(w * n) for w in weights]
fractions = [(w * n) % 1 for w in weights]
return (flatten([x] * nx for x, nx in zip(seq, wholes)) +
weighted_sample_with_replacement(n - sum(wholes), seq, fractions))
# metrics
def accuracy_score(y_pred, y_true):
assert y_pred.shape == y_true.shape
return np.mean(np.equal(y_pred, y_true))
def r2_score(y_pred, y_true):
assert y_pred.shape == y_true.shape
return 1. - (np.sum(np.square(y_pred - y_true)) / # sum of square of residuals
np.sum(np.square(y_true - np.mean(y_true)))) # total sum of squares
# datasets
orings = DataSet(name='orings', target='Distressed', attr_names='Rings Distressed Temp Pressure Flightnum')
zoo = DataSet(name='zoo', target='type', exclude=['name'],
attr_names='name hair feathers eggs milk airborne aquatic predator toothed backbone '
'breathes venomous fins legs tail domestic catsize type')
iris = DataSet(name='iris', target='class', attr_names='sepal-len sepal-width petal-len petal-width class')
def RestaurantDataSet(examples=None):
"""
[Figure 18.3]
Build a DataSet of Restaurant waiting examples.
"""
return DataSet(name='restaurant', target='Wait', examples=examples,
attr_names='Alternate Bar Fri/Sat Hungry Patrons Price Raining Reservation Type WaitEstimate Wait')
restaurant = RestaurantDataSet()
def T(attr_name, branches):
branches = {value: (child if isinstance(child, DecisionFork) else DecisionLeaf(child))
for value, child in branches.items()}
return DecisionFork(restaurant.attr_num(attr_name), attr_name, print, branches)
"""
[Figure 18.2]
A decision tree for deciding whether to wait for a table at a hotel.
"""
waiting_decision_tree = T('Patrons',
{'None': 'No', 'Some': 'Yes',
'Full': T('WaitEstimate',
{'>60': 'No', '0-10': 'Yes',
'30-60': T('Alternate',
{'No': T('Reservation',
{'Yes': 'Yes',
'No': T('Bar', {'No': 'No',
'Yes': 'Yes'})}),
'Yes': T('Fri/Sat', {'No': 'No', 'Yes': 'Yes'})}),
'10-30': T('Hungry',
{'No': 'Yes',
'Yes': T('Alternate',
{'No': 'Yes',
'Yes': T('Raining',
{'No': 'No',
'Yes': 'Yes'})})})})})
def SyntheticRestaurant(n=20):
"""Generate a DataSet with n examples."""
def gen():
example = list(map(random.choice, restaurant.values))
example[restaurant.target] = waiting_decision_tree(example)
return example
return RestaurantDataSet([gen() for _ in range(n)])
def Majority(k, n):
"""
Return a DataSet with n k-bit examples of the majority problem:
k random bits followed by a 1 if more than half the bits are 1, else 0.
"""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for _ in range(k)]
bits.append(int(sum(bits) > k / 2))
examples.append(bits)
return DataSet(name='majority', examples=examples)
def Parity(k, n, name='parity'):
"""
Return a DataSet with n k-bit examples of the parity problem:
k random bits followed by a 1 if an odd number of bits are 1, else 0.
"""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for _ in range(k)]
bits.append(sum(bits) % 2)
examples.append(bits)
return DataSet(name=name, examples=examples)
def Xor(n):
"""Return a DataSet with n examples of 2-input xor."""
return Parity(2, n, name='xor')
def ContinuousXor(n):
"""2 inputs are chosen uniformly from (0.0 .. 2.0]; output is xor of ints."""
examples = []
for i in range(n):
x, y = [random.uniform(0.0, 2.0) for _ in '12']
examples.append([x, y, x != y])
return DataSet(name='continuous xor', examples=examples)
def compare(algorithms=None, datasets=None, k=10, trials=1):
"""
Compare various learners on various datasets using cross-validation.
Print results as a table.
"""
# default list of algorithms
algorithms = algorithms or [PluralityLearner, NaiveBayesLearner, NearestNeighborLearner, DecisionTreeLearner]
# default list of datasets
datasets = datasets or [iris, orings, zoo, restaurant, SyntheticRestaurant(20),
Majority(7, 100), Parity(7, 100), Xor(100)]
print_table([[a.__name__.replace('Learner', '')] + [cross_validation(a, d, k=k, trials=trials) for d in datasets]
for a in algorithms], header=[''] + [d.name[0:7] for d in datasets], numfmt='%.2f')
| RandomForest |
python | Pylons__pyramid | tests/test_util.py | {
"start": 6249,
"end": 10172
} | class ____(unittest.TestCase):
def _makeOne(self):
cls = self._getTargetClass()
class Foo(cls):
pass
return Foo()
def _getTargetClass(self):
from pyramid.util import InstancePropertyMixin
return InstancePropertyMixin
def test_callable(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker)
foo.bar = 1
self.assertEqual(1, foo.worker)
foo.bar = 2
self.assertEqual(2, foo.worker)
def test_callable_with_name(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker, name='x')
foo.bar = 1
self.assertEqual(1, foo.x)
foo.bar = 2
self.assertEqual(2, foo.x)
def test_callable_with_reify(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker, reify=True)
foo.bar = 1
self.assertEqual(1, foo.worker)
foo.bar = 2
self.assertEqual(1, foo.worker)
def test_callable_with_name_reify(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker, name='x')
foo.set_property(worker, name='y', reify=True)
foo.bar = 1
self.assertEqual(1, foo.y)
self.assertEqual(1, foo.x)
foo.bar = 2
self.assertEqual(2, foo.x)
self.assertEqual(1, foo.y)
def test_property_without_name(self):
def worker(obj): # pragma: no cover
pass
foo = self._makeOne()
self.assertRaises(ValueError, foo.set_property, property(worker))
def test_property_with_name(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(property(worker), name='x')
foo.bar = 1
self.assertEqual(1, foo.x)
foo.bar = 2
self.assertEqual(2, foo.x)
def test_property_with_reify(self):
def worker(obj): # pragma: no cover
pass
foo = self._makeOne()
self.assertRaises(
ValueError,
foo.set_property,
property(worker),
name='x',
reify=True,
)
def test_override_property(self):
def worker(obj):
pass
foo = self._makeOne()
foo.set_property(worker, name='x')
self.assertIsNone(foo.x)
foo.x = 1
self.assertEqual(foo.x, 1)
del foo.x
self.assertIsNone(foo.x)
def test_override_reify(self):
def worker(obj):
pass
foo = self._makeOne()
foo.set_property(worker, name='x', reify=True)
self.assertIsNone(foo.x)
foo.x = 1
self.assertEqual(1, foo.x)
foo.x = 2
self.assertEqual(2, foo.x)
def test_reset_property(self):
foo = self._makeOne()
foo.set_property(lambda _: 1, name='x')
self.assertEqual(1, foo.x)
foo.set_property(lambda _: 2, name='x')
self.assertEqual(2, foo.x)
def test_reset_reify(self):
"""This is questionable behavior, but may as well get notified
if it changes."""
foo = self._makeOne()
foo.set_property(lambda _: 1, name='x', reify=True)
self.assertEqual(1, foo.x)
foo.set_property(lambda _: 2, name='x', reify=True)
self.assertEqual(1, foo.x)
def test_new_class_keeps_parent_module_name(self):
foo = self._makeOne()
self.assertEqual(foo.__module__, 'tests.test_util')
self.assertEqual(foo.__class__.__module__, 'tests.test_util')
foo.set_property(lambda _: 1, name='x', reify=True)
self.assertEqual(foo.__module__, 'tests.test_util')
self.assertEqual(foo.__class__.__module__, 'tests.test_util')
| Test_InstancePropertyMixin |
python | pydata__xarray | xarray/core/accessor_dt.py | {
"start": 22225,
"end": 23421
} | class ____(
DatetimeAccessor[T_DataArray], TimedeltaAccessor[T_DataArray]
):
def __new__(cls, obj: T_DataArray) -> Self:
# CombinedDatetimelikeAccessor isn't really instantiated. Instead
# we need to choose which parent (datetime or timedelta) is
# appropriate. Since we're checking the dtypes anyway, we'll just
# do all the validation here.
if not _contains_datetime_like_objects(obj.variable):
# We use an AttributeError here so that `obj.dt` raises an error that
# `getattr` expects; https://github.com/pydata/xarray/issues/8718. It's a
# bit unusual in a `__new__`, but that's the only case where we use this
# class.
raise AttributeError(
"'.dt' accessor only available for "
"DataArray with datetime64 timedelta64 dtype or "
"for arrays containing cftime datetime "
"objects."
)
if is_np_timedelta_like(obj.dtype):
return TimedeltaAccessor(obj) # type: ignore[return-value]
else:
return DatetimeAccessor(obj) # type: ignore[return-value]
| CombinedDatetimelikeAccessor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 999905,
"end": 1000625
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for User."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("TeamMemberEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("User"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| TeamMemberConnection |
python | getsentry__sentry | src/sentry/utils/query.py | {
"start": 7839,
"end": 8293
} | class ____[V](RangeQuerySetWrapper[V]):
def get_total_count(self) -> int:
return self.queryset.count()
def __iter__(self) -> Iterator[V]:
total_count = self.get_total_count()
iterator = super().__iter__()
verbose_name = self.queryset.model._meta.verbose_name_plural or self.queryset.model.__name__
return iter(WithProgressBar(iterator, total_count, verbose_name.title()))
| RangeQuerySetWrapperWithProgressBar |
python | joke2k__faker | faker/providers/file/__init__.py | {
"start": 156,
"end": 14898
} | class ____(BaseProvider):
"""Implement default file provider for Faker."""
application_mime_types: ElementsType[str] = (
"application/atom+xml", # Atom feeds
"application/ecmascript",
# ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to
# application/javascript but with stricter processing rules)
"application/EDI-X12", # EDI X12 data; Defined in RFC 1767
"application/EDIFACT", # EDI EDIFACT data; Defined in RFC 1767
"application/json", # JavaScript Object Notation JSON; Defined in RFC 4627
# ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to
# application/ecmascript
"application/javascript",
# but with looser processing rules) It is not accepted in IE 8
# or earlier - text/javascript is accepted but it is defined as obsolete in RFC 4329.
# The "type" attribute of the <script> tag in HTML5 is optional and in practice
# omitting the media type of JavaScript programs is the most interoperable
# solution since all browsers have always assumed the correct
# default even before HTML5.
"application/octet-stream",
# Arbitrary binary data.[6] Generally speaking this type identifies files that are not associated with
# a specific application. Contrary to past assumptions by software packages such as Apache this is not
# a type that should be applied to unknown files. In such a case, a server or application should not indicate
# a content type, as it may be incorrect, but rather, should omit the type in order to allow the recipient
# to guess the type.[7]
"application/ogg", # Ogg, a multimedia bitstream container format; Defined in RFC 5334
"application/pdf", # Portable Document Format, PDF has been in use for document exchange
# on the Internet since 1993; Defined in RFC 3778
"application/postscript", # PostScript; Defined in RFC 2046
"application/rdf+xml", # Resource Description Framework; Defined by RFC 3870
"application/rss+xml", # RSS feeds
"application/soap+xml", # SOAP; Defined by RFC 3902
# Web Open Font Format; (candidate recommendation; use application/x-font-woff
"application/font-woff",
# until standard is official)
"application/xhtml+xml", # XHTML; Defined by RFC 3236
"application/xml-dtd", # DTD files; Defined by RFC 3023
"application/xop+xml", # XOP
"application/zip", # ZIP archive files; Registered[8]
"application/gzip", # Gzip, Defined in RFC 6713
)
audio_mime_types: ElementsType[str] = (
"audio/basic", # mulaw audio at 8 kHz, 1 channel; Defined in RFC 2046
"audio/L24", # 24bit Linear PCM audio at 8-48 kHz, 1-N channels; Defined in RFC 3190
"audio/mp4", # MP4 audio
"audio/mpeg", # MP3 or other MPEG audio; Defined in RFC 3003
"audio/ogg", # Ogg Vorbis, Speex, Flac and other audio; Defined in RFC 5334
"audio/vorbis", # Vorbis encoded audio; Defined in RFC 5215
# RealAudio; Documented in RealPlayer Help[9]
"audio/vnd.rn-realaudio",
"audio/vnd.wave", # WAV audio; Defined in RFC 2361
"audio/webm", # WebM open media format
)
image_mime_types: ElementsType[str] = (
"image/gif", # GIF image; Defined in RFC 2045 and RFC 2046
"image/jpeg", # JPEG JFIF image; Defined in RFC 2045 and RFC 2046
"image/pjpeg",
# JPEG JFIF image; Associated with Internet Explorer; Listed in ms775147(v=vs.85) - Progressive JPEG,
# initiated before global browser support for progressive JPEGs (Microsoft and Firefox).
# Portable Network Graphics; Registered,[10] Defined in RFC 2083
"image/png",
"image/svg+xml", # SVG vector image; Defined in SVG Tiny 1.2 Specification Appendix M
# Tag Image File Format (only for Baseline TIFF); Defined in RFC 3302
"image/tiff",
"image/vnd.microsoft.icon", # ICO image; Registered[11]
)
message_mime_types: ElementsType[str] = (
"message/http", # Defined in RFC 2616
"message/imdn+xml", # IMDN Instant Message Disposition Notification; Defined in RFC 5438
"message/partial", # Email; Defined in RFC 2045 and RFC 2046
# Email; EML files, MIME files, MHT files, MHTML files; Defined in RFC
# 2045 and RFC 2046
"message/rfc822",
)
model_mime_types: ElementsType[str] = (
"model/example", # Defined in RFC 4735
"model/iges", # IGS files, IGES files; Defined in RFC 2077
"model/mesh", # MSH files, MESH files; Defined in RFC 2077, SILO files
"model/vrml", # WRL files, VRML files; Defined in RFC 2077
# X3D ISO standard for representing 3D computer graphics, X3DB binary
# files
"model/x3d+binary",
"model/x3d+vrml", # X3D ISO standard for representing 3D computer graphics, X3DV VRML files
"model/x3d+xml", # X3D ISO standard for representing 3D computer graphics, X3D XML files
)
multipart_mime_types: ElementsType[str] = (
"multipart/mixed", # MIME Email; Defined in RFC 2045 and RFC 2046
"multipart/alternative", # MIME Email; Defined in RFC 2045 and RFC 2046
# MIME Email; Defined in RFC 2387 and used by MHTML (HTML mail)
"multipart/related",
"multipart/form-data", # MIME Webform; Defined in RFC 2388
"multipart/signed", # Defined in RFC 1847
"multipart/encrypted", # Defined in RFC 1847
)
text_mime_types: ElementsType[str] = (
"text/cmd", # commands; subtype resident in Gecko browsers like Firefox 3.5
"text/css", # Cascading Style Sheets; Defined in RFC 2318
"text/csv", # Comma-separated values; Defined in RFC 4180
"text/html", # HTML; Defined in RFC 2854
"text/javascript",
# (Obsolete): JavaScript; Defined in and obsoleted by RFC 4329 in order to discourage its usage in favor of
# application/javascript. However, text/javascript is allowed in HTML 4 and 5 and, unlike
# application/javascript, has cross-browser support. The "type" attribute of the <script> tag in HTML5 is
# optional and there is no need to use it at all since all browsers have always assumed the correct default
# (even in HTML 4 where it was required by the specification).
"text/plain", # Textual data; Defined in RFC 2046 and RFC 3676
"text/vcard", # vCard (contact information); Defined in RFC 6350
"text/xml", # Extensible Markup Language; Defined in RFC 3023
)
video_mime_types: ElementsType[str] = (
"video/mpeg", # MPEG-1 video with multiplexed audio; Defined in RFC 2045 and RFC 2046
"video/mp4", # MP4 video; Defined in RFC 4337
# Ogg Theora or other video (with audio); Defined in RFC 5334
"video/ogg",
"video/quicktime", # QuickTime video; Registered[12]
"video/webm", # WebM Matroska-based open media format
"video/x-matroska", # Matroska open media format
"video/x-ms-wmv", # Windows Media Video; Documented in Microsoft KB 288102
"video/x-flv", # Flash video (FLV files)
)
mime_types: Dict[str, ElementsType[str]] = OrderedDict(
(
("application", application_mime_types),
("audio", audio_mime_types),
("image", image_mime_types),
("message", message_mime_types),
("model", model_mime_types),
("multipart", multipart_mime_types),
("text", text_mime_types),
("video", video_mime_types),
)
)
audio_file_extensions: ElementsType[str] = (
"flac",
"mp3",
"wav",
)
image_file_extensions: ElementsType[str] = (
"bmp",
"gif",
"jpeg",
"jpg",
"png",
"tiff",
)
text_file_extensions: ElementsType[str] = (
"css",
"csv",
"html",
"js",
"json",
"txt",
)
video_file_extensions: ElementsType[str] = (
"mp4",
"avi",
"mov",
"webm",
)
office_file_extensions: ElementsType[str] = (
"doc", # legacy MS Word
"docx", # MS Word
"xls", # legacy MS Excel
"xlsx", # MS Excel
"ppt", # legacy MS PowerPoint
"pptx", # MS PowerPoint
"odt", # LibreOffice document
"ods", # LibreOffice spreadsheet
"odp", # LibreOffice presentation
"pages", # Apple Pages
"numbers", # Apple Numbers
"key", # Apple Keynote
"pdf", # Portable Document Format
)
file_extensions: Dict[str, ElementsType[str]] = OrderedDict(
(
("audio", audio_file_extensions),
("image", image_file_extensions),
("office", office_file_extensions),
("text", text_file_extensions),
("video", video_file_extensions),
)
)
file_systems_path_rules: Dict[str, Dict] = {
"windows": {
"root": "C:\\",
"separator": "\\",
},
"linux": {
"root": "/",
"separator": "/",
},
}
unix_device_prefixes: ElementsType[str] = ("sd", "vd", "xvd")
def mime_type(self, category: Optional[str] = None) -> str:
"""Generate a mime type under the specified ``category``.
If ``category`` is ``None``, a random category will be used. The list of
valid categories include ``'application'``, ``'audio'``, ``'image'``,
``'message'``, ``'model'``, ``'multipart'``, ``'text'``, and
``'video'``.
:sample:
:sample: category='application'
"""
category = category if category else self.random_element(list(self.mime_types.keys()))
return self.random_element(self.mime_types[category])
def file_name(self, category: Optional[str] = None, extension: Optional[str] = None) -> str:
"""Generate a random file name with extension.
If ``extension`` is ``None``, a random extension will be created
under the hood using |file_extension| with the specified
``category``. If a value for ``extension`` is provided, the
value will be used instead, and ``category`` will be ignored.
The actual name part itself is generated using |word|. If
extension is an empty string then no extension will be added,
and file_name will be the same as |word|.
:sample: size=10
:sample: category='audio'
:sample: extension='abcdef'
:sample: category='audio', extension='abcdef'
:sample: extension=''
"""
if extension is None:
extension = self.file_extension(category)
filename: str = self.generator.word()
return f"{filename}.{extension}" if extension else filename
def file_extension(self, category: Optional[str] = None) -> str:
"""Generate a file extension under the specified ``category``.
If ``category`` is ``None``, a random category will be used. The list of
valid categories include: ``'audio'``, ``'image'``, ``'office'``,
``'text'``, and ``'video'``.
:sample:
:sample: category='image'
"""
if category is None:
category = self.random_element(list(self.file_extensions.keys()))
return self.random_element(self.file_extensions[category])
def file_path(
self,
depth: int = 1,
category: Optional[str] = None,
extension: Optional[Union[str, Sequence[str]]] = None,
absolute: Optional[bool] = True,
file_system_rule: Literal["linux", "windows"] = "linux",
) -> str:
"""Generate an pathname to a file.
This method uses |file_name| under the hood to generate the file
name itself, and ``depth`` controls the depth of the directory
path, and |word| is used under the hood to generate the
different directory names.
If ``absolute`` is ``True`` (default), the generated path starts
with ``/`` and is absolute. Otherwise, the generated path is
relative.
If used, ``extension`` can be either a string, forcing that
extension, a sequence of strings (one will be picked at random),
or an empty sequence (the path will have no extension). Default
behaviour is the same as |file_name|
if ``file_system`` is set (default="linux"), the generated path uses
specified file system path standard, the list of valid file systems include:
``'windows'``, ``'linux'``.
:sample: size=10
:sample: depth=3
:sample: depth=5, category='video'
:sample: depth=5, category='video', extension='abcdef'
:sample: extension=[]
:sample: extension=''
:sample: extension=["a", "bc", "def"]
:sample: depth=5, category='video', extension='abcdef', file_system='windows'
"""
if extension is not None and not isinstance(extension, str):
if len(extension):
extension = self.random_element(extension)
else:
extension = ""
fs_rule = self.file_systems_path_rules.get(file_system_rule, None)
if not fs_rule:
raise TypeError("Specified file system is invalid.")
root = fs_rule["root"]
seperator = fs_rule["separator"]
path: str = self.file_name(category, extension)
for _ in range(0, depth):
path = f"{self.generator.word()}{seperator}{path}"
return root + path if absolute else path
def unix_device(self, prefix: Optional[str] = None) -> str:
"""Generate a Unix device file name.
If ``prefix`` is ``None``, a random prefix will be used. The list of
valid prefixes include: ``'sd'``, ``'vd'``, and ``'xvd'``.
:sample:
:sample: prefix='mmcblk'
"""
if prefix is None:
prefix = self.random_element(self.unix_device_prefixes)
suffix: str = self.random_element(string.ascii_lowercase)
path = f"/dev/{prefix}{suffix}"
return path
def unix_partition(self, prefix: Optional[str] = None) -> str:
"""Generate a Unix partition name.
This method uses |unix_device| under the hood to create a device file
name with the specified ``prefix``.
:sample:
:sample: prefix='mmcblk'
"""
path: str = self.unix_device(prefix=prefix)
path += str(self.random_digit())
return path
| Provider |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_qtagg.py | {
"start": 3343,
"end": 3413
} | class ____(_BackendQT):
FigureCanvas = FigureCanvasQTAgg
| _BackendQTAgg |
python | scipy__scipy | benchmarks/benchmarks/spatial.py | {
"start": 11021,
"end": 11720
} | class ____(Benchmark):
params = ([10, 100, 1000, 5000, 10000],
[2, 3])
param_names = ['num_points', 'ndim']
def setup(self, num_points, ndim):
if ndim == 2:
center = np.zeros(2)
self.points = generate_circle_points(num_points)
else:
center = np.zeros(3)
self.points = generate_spherical_points(num_points)
self.sv = SphericalVoronoi(self.points, radius=1,
center=center)
def time_spherical_polygon_area_calculation(self, num_points, ndim):
"""Time the area calculation in the Spherical Voronoi code."""
self.sv.calculate_areas()
| SphericalVorAreas |
python | tensorflow__tensorflow | tensorflow/python/training/session_manager.py | {
"start": 23266,
"end": 23640
} | class ____:
"""A timer that tracks a duration since creation."""
__slots__ = ["_start_time_secs", "_duration_secs"]
def __init__(self, duration_secs):
self._start_time_secs = time.time()
self._duration_secs = duration_secs
def secs_remaining(self):
diff = self._duration_secs - (time.time() - self._start_time_secs)
return max(0, diff)
| _CountDownTimer |
python | getsentry__sentry | src/sentry/api/endpoints/release_thresholds/release_threshold.py | {
"start": 916,
"end": 1086
} | class ____(TypedDict, total=False):
threshold_type: int
trigger_type: int
value: int
window_in_seconds: int
environment: object
| ReleaseThresholdPOSTData |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/interfaces.py | {
"start": 2788,
"end": 3307
} | class ____(Enum):
"""indicates the :term:`DBAPI` cursor method that will be used to invoke
a statement."""
EXECUTE = 0
"""indicates cursor.execute() will be used"""
EXECUTEMANY = 1
"""indicates cursor.executemany() will be used."""
INSERTMANYVALUES = 2
"""indicates cursor.execute() will be used with an INSERT where the
VALUES expression will be expanded to accommodate for multiple
parameter sets
.. seealso::
:ref:`engine_insertmanyvalues`
"""
| ExecuteStyle |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-vertexai/llama_index/indices/managed/vertexai/base.py | {
"start": 1215,
"end": 6842
} | class ____(BaseManagedIndex):
"""
Vertex AI Index.
The Vertex AI RAG index implements a managed index that uses Vertex AI as the backend.
Vertex AI performs a lot of the functions in traditional indexes in the backend:
- breaks down a document into chunks (nodes)
- Creates the embedding for each chunk (node)
- Performs the search for the top k most similar nodes to a query
- Optionally can perform summarization of the top k nodes
Args:
show_progress (bool): Whether to show tqdm progress bars. Defaults to False.
"""
def __init__(
self,
project_id: str,
location: Optional[str] = None,
corpus_id: Optional[str] = None,
corpus_display_name: Optional[str] = None,
corpus_description: Optional[str] = None,
show_progress: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the Vertex AI API."""
if corpus_id and (corpus_display_name or corpus_description):
raise ValueError(
"Cannot specify both corpus_id and corpus_display_name or corpus_description"
)
self.project_id = project_id
self.location = location
self.show_progress = show_progress
self._user_agent = get_user_agent("vertexai-rag")
vertexai.init(project=self.project_id, location=self.location)
with telemetry.tool_context_manager(self._user_agent):
# If a corpus is not specified, create a new one.
if corpus_id:
# Make sure corpus exists
self.corpus_name = rag.get_corpus(name=corpus_id).name
else:
self.corpus_name = rag.create_corpus(
display_name=corpus_display_name, description=corpus_description
).name
def import_files(
self,
uris: Sequence[str],
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> ImportRagFilesResponse:
"""Import Google Cloud Storage or Google Drive files into the index."""
# Convert https://storage.googleapis.com URLs to gs:// format
uris = [
re.sub(r"^https://storage\.googleapis\.com/", "gs://", uri) for uri in uris
]
with telemetry.tool_context_manager(self._user_agent):
return rag.import_files(
self.corpus_name,
paths=uris,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
timeout=timeout,
**kwargs,
)
def insert_file(
self,
file_path: str,
metadata: Optional[dict] = None,
**insert_kwargs: Any,
) -> Optional[str]:
"""Insert a local file into the index."""
if metadata:
display_name = metadata.get("display_name")
description = metadata.get("description")
with telemetry.tool_context_manager(self._user_agent):
rag_file = rag.upload_file(
corpus_name=self.corpus_name,
path=file_path,
display_name=display_name,
description=description,
**insert_kwargs,
)
return rag_file.name if rag_file else None
def list_files(self) -> Sequence[str]:
"""List all files in the index."""
files = []
with telemetry.tool_context_manager(self._user_agent):
for file in rag.list_files(corpus_name=self.corpus_name):
files.append(file.name)
return files
def delete_file(self, file_name: str) -> None:
"""Delete file from the index."""
with telemetry.tool_context_manager(self._user_agent):
rag.delete_file(name=file_name, corpus_name=self.corpus_name)
def as_query_engine(self, **kwargs: Any) -> BaseQueryEngine:
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
kwargs["retriever"] = self.as_retriever(**kwargs)
return RetrieverQueryEngine.from_args(**kwargs)
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
"""Return a Retriever for this managed index."""
from llama_index.indices.managed.vertexai.retriever import (
VertexAIRetriever,
)
similarity_top_k = kwargs.pop("similarity_top_k", None)
vector_distance_threshold = kwargs.pop("vector_distance_threshold", None)
return VertexAIRetriever(
self.corpus_name,
similarity_top_k,
vector_distance_threshold,
self._user_agent,
**kwargs,
)
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a set of documents (each a node)."""
raise NotImplementedError("Node insertion is not supported.")
def delete_ref_doc(
self, ref_doc_id: str, delete_from_docstore: bool = False, **delete_kwargs: Any
) -> None:
"""Delete a document and it's nodes by using ref_doc_id."""
if delete_from_docstore:
with telemetry.tool_context_manager(self._user_agent):
rag.delete_file(
name=ref_doc_id,
corpus_name=self.corpus_name,
)
def update_ref_doc(self, document: Document, **update_kwargs: Any) -> None:
"""Update a document and it's corresponding nodes."""
raise NotImplementedError("Document update is not supported.")
| VertexAIIndex |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/encoders.py | {
"start": 5120,
"end": 6384
} | class ____(nn.Module):
"""
CNN architecture used by King in their Candy Crush predictor
https://www.researchgate.net/publication/328307928_Human-Like_Playtesting_with_Deep_Learning
"""
def __init__(
self, height: int, width: int, initial_channels: int, output_size: int
):
super().__init__()
self.h_size = output_size
conv_1_hw = conv_output_shape((height, width), 3, 1)
conv_2_hw = conv_output_shape(conv_1_hw, 3, 1)
self.final_flat = conv_2_hw[0] * conv_2_hw[1] * 144
self.conv_layers = nn.Sequential(
nn.Conv2d(initial_channels, 35, [3, 3], [1, 1]),
nn.LeakyReLU(),
nn.Conv2d(35, 144, [3, 3], [1, 1]),
nn.LeakyReLU(),
)
self.dense = nn.Sequential(
linear_layer(
self.final_flat,
self.h_size,
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=1.41, # Use ReLU gain
),
nn.LeakyReLU(),
)
def forward(self, visual_obs: torch.Tensor) -> torch.Tensor:
hidden = self.conv_layers(visual_obs)
hidden = hidden.reshape(-1, self.final_flat)
return self.dense(hidden)
| SmallVisualEncoder |
python | spack__spack | lib/spack/spack/vendor/pyrsistent/_precord.py | {
"start": 4448,
"end": 7084
} | class ____(PMap._Evolver):
__slots__ = ('_destination_cls', '_invariant_error_codes', '_missing_fields', '_factory_fields', '_ignore_extra')
def __init__(self, cls, original_pmap, _factory_fields=None, _ignore_extra=False):
super(_PRecordEvolver, self).__init__(original_pmap)
self._destination_cls = cls
self._invariant_error_codes = []
self._missing_fields = []
self._factory_fields = _factory_fields
self._ignore_extra = _ignore_extra
def __setitem__(self, key, original_value):
self.set(key, original_value)
def set(self, key, original_value):
field = self._destination_cls._precord_fields.get(key)
if field:
if self._factory_fields is None or field in self._factory_fields:
try:
if is_field_ignore_extra_complaint(PRecord, field, self._ignore_extra):
value = field.factory(original_value, ignore_extra=self._ignore_extra)
else:
value = field.factory(original_value)
except InvariantException as e:
self._invariant_error_codes += e.invariant_errors
self._missing_fields += e.missing_fields
return self
else:
value = original_value
check_type(self._destination_cls, field, key, value)
is_ok, error_code = field.invariant(value)
if not is_ok:
self._invariant_error_codes.append(error_code)
return super(_PRecordEvolver, self).set(key, value)
else:
raise AttributeError("'{0}' is not among the specified fields for {1}".format(key, self._destination_cls.__name__))
def persistent(self):
cls = self._destination_cls
is_dirty = self.is_dirty()
pm = super(_PRecordEvolver, self).persistent()
if is_dirty or not isinstance(pm, cls):
result = cls(_precord_buckets=pm._buckets, _precord_size=pm._size)
else:
result = pm
if cls._precord_mandatory_fields:
self._missing_fields += tuple('{0}.{1}'.format(cls.__name__, f) for f
in (cls._precord_mandatory_fields - set(result.keys())))
if self._invariant_error_codes or self._missing_fields:
raise InvariantException(tuple(self._invariant_error_codes), tuple(self._missing_fields),
'Field invariant failed')
check_global_invariants(result, cls._precord_invariants)
return result
| _PRecordEvolver |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 306181,
"end": 307318
} | class ____(Response):
"""
Response of tasks.edit_configuration endpoint.
:param updated: Indicates if the task was updated successfully
:type updated: int
"""
_service = "tasks"
_action = "edit_configuration"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(EditConfigurationResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| EditConfigurationResponse |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_test.py | {
"start": 37303,
"end": 54089
} | class ____(test.TestCase):
def test_keys_empty(self):
with self.assertRaisesRegex(ValueError,
'keys must be a list with length > 1'):
fc._crossed_column([], 10)
def test_keys_length_one(self):
with self.assertRaisesRegex(ValueError,
'keys must be a list with length > 1'):
fc._crossed_column(['a'], 10)
def test_key_type_unsupported(self):
with self.assertRaisesRegex(ValueError, 'Unsupported key type'):
fc._crossed_column(['a', fc._numeric_column('c')], 10)
with self.assertRaisesRegex(
ValueError, 'categorical_column_with_hash_bucket is not supported'):
fc._crossed_column(
['a', fc._categorical_column_with_hash_bucket('c', 10)], 10)
def test_hash_bucket_size_negative(self):
with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
fc._crossed_column(['a', 'c'], -1)
def test_hash_bucket_size_zero(self):
with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
fc._crossed_column(['a', 'c'], 0)
def test_hash_bucket_size_none(self):
with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
fc._crossed_column(['a', 'c'], None)
def test_name(self):
a = fc._numeric_column('a', dtype=dtypes.int32)
b = fc._bucketized_column(a, boundaries=[0, 1])
crossed1 = fc._crossed_column(['d1', 'd2'], 10)
crossed2 = fc._crossed_column([b, 'c', crossed1], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc._numeric_column('a', dtype=dtypes.int32)
b = fc._bucketized_column(a, boundaries=[0, 1])
crossed1 = fc._crossed_column(['d1', 'd2'], 10)
crossed2 = fc._crossed_column([crossed1, 'c', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_leaf_keys_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc._numeric_column('a', dtype=dtypes.int32)
b = fc._bucketized_column(a, boundaries=[0, 1])
crossed1 = fc._crossed_column(['d2', 'c'], 10)
crossed2 = fc._crossed_column([crossed1, 'd1', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_var_scope_name(self):
a = fc._numeric_column('a', dtype=dtypes.int32)
b = fc._bucketized_column(a, boundaries=[0, 1])
crossed1 = fc._crossed_column(['d1', 'd2'], 10)
crossed2 = fc._crossed_column([b, 'c', crossed1], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2._var_scope_name)
def test_parse_spec(self):
a = fc._numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc._bucketized_column(a, boundaries=[0, 1])
crossed = fc._crossed_column([b, 'c'], 10)
self.assertEqual({
'a': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32),
'c': parsing_ops.VarLenFeature(dtypes.string),
}, crossed._parse_example_spec)
def test_num_buckets(self):
a = fc._numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc._bucketized_column(a, boundaries=[0, 1])
crossed = fc._crossed_column([b, 'c'], 15)
self.assertEqual(15, crossed._num_buckets)
def test_deep_copy(self):
a = fc._numeric_column('a', dtype=dtypes.int32)
b = fc._bucketized_column(a, boundaries=[0, 1])
crossed1 = fc._crossed_column(['d1', 'd2'], 10)
crossed2 = fc._crossed_column([b, 'c', crossed1], 15, hash_key=5)
crossed2_copy = copy.deepcopy(crossed2)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2_copy.name,)
self.assertEqual(15, crossed2_copy.hash_bucket_size)
self.assertEqual(5, crossed2_copy.hash_key)
def test_parse_example(self):
price = fc._numeric_column('price', shape=[2])
bucketized_price = fc._bucketized_column(price, boundaries=[0, 50])
price_cross_wire = fc._crossed_column([bucketized_price, 'wire'], 10)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.])),
'wire':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([price_cross_wire]))
self.assertIn('price', features)
self.assertIn('wire', features)
with self.cached_session():
self.assertAllEqual([[20., 110.]], features['price'])
wire_sparse = features['wire']
self.assertAllEqual([[0, 0], [0, 1]], wire_sparse.indices)
# Use byte constants to pass the open-source test.
self.assertAllEqual([b'omar', b'stringer'], wire_sparse.values)
self.assertAllEqual([1, 2], wire_sparse.dense_shape)
def test_transform_feature(self):
price = fc._numeric_column('price', shape=[2])
bucketized_price = fc._bucketized_column(price, boundaries=[0, 50])
hash_bucket_size = 10
price_cross_wire = fc._crossed_column([bucketized_price, 'wire'],
hash_bucket_size)
features = {
'price': constant_op.constant([[1., 2.], [5., 6.]]),
'wire': sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2]),
}
outputs = _transform_features(features, [price_cross_wire])
output = outputs[price_cross_wire]
output_val = self.evaluate(output)
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [1, 3]],
output_val.indices)
for val in output_val.values:
self.assertIn(val, list(range(hash_bucket_size)))
self.assertAllEqual([2, 4], output_val.dense_shape)
def test_get_sparse_tensors(self):
a = fc._numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc._bucketized_column(a, boundaries=(0, 1))
crossed1 = fc._crossed_column(['d1', 'd2'], 10)
crossed2 = fc._crossed_column([b, 'c', crossed1], 15, hash_key=5)
with ops.Graph().as_default():
builder = _LazyBuilder({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
'd1':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d1A', 'd1B', 'd1C'],
dense_shape=(2, 2)),
'd2':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d2A', 'd2B', 'd2C'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed2._get_sparse_tensors(builder)
with _initialized_session():
id_tensor_eval = id_weight_pair.id_tensor.eval()
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
(1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13),
(1, 14), (1, 15)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (
6, 14, 0, 13, 8, 8, 10, 12, 2, 0, 1, 9, 8, 12, 2, 0, 10, 11)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 16), id_tensor_eval.dense_shape)
def test_get_sparse_tensors_simple(self):
"""Same as test_get_sparse_tensors, but with simpler values."""
a = fc._numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc._bucketized_column(a, boundaries=(0, 1))
crossed = fc._crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
builder = _LazyBuilder({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed._get_sparse_tensors(builder)
with _initialized_session():
id_tensor_eval = id_weight_pair.id_tensor.eval()
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (1, 0, 1, 3, 4, 2)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 4), id_tensor_eval.dense_shape)
def test_linear_model(self):
"""Tests linear_model.
Uses data from test_get_sparse_tensors_simple.
"""
a = fc._numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc._bucketized_column(a, boundaries=(0, 1))
crossed = fc._crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = fc.linear_model({
'a': constant_op.constant(((-1., .5), (.5, 1.))),
'c': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)),
self.evaluate(crossed_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
self.evaluate(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions))
self.evaluate(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions))
def test_linear_model_with_weights(self):
class _TestColumnWithWeights(_CategoricalColumn):
"""Produces sparse IDs and sparse weights."""
@property
def name(self):
return 'test_column'
@property
def _parse_example_spec(self):
return {
self.name: parsing_ops.VarLenFeature(dtypes.int32),
'{}_weights'.format(self.name): parsing_ops.VarLenFeature(
dtypes.float32),
}
@property
def _num_buckets(self):
return 5
def _transform_feature(self, inputs):
return (inputs.get(self.name),
inputs.get('{}_weights'.format(self.name)))
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Populates both id_tensor and weight_tensor."""
ids_and_weights = inputs.get(self)
return _CategoricalColumn.IdWeightPair(
id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
t = _TestColumnWithWeights()
crossed = fc._crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError,
'crossed_column does not support weight_tensor.*{}'.format(t.name)):
fc.linear_model({
t.name: sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[0, 1, 2],
dense_shape=(2, 2)),
'{}_weights'.format(t.name): sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[1., 10., 2.],
dense_shape=(2, 2)),
'c': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
def test_keras_linear_model(self):
"""Tests _LinearModel.
Uses data from test_get_sparse_tensors_simple.
"""
a = fc._numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc._bucketized_column(a, boundaries=(0, 1))
crossed = fc._crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)),
self.evaluate(crossed_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
self.evaluate(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions))
self.evaluate(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions))
def test_keras_linear_model_with_weights(self):
class _TestColumnWithWeights(_CategoricalColumn):
"""Produces sparse IDs and sparse weights."""
@property
def name(self):
return 'test_column'
@property
def _parse_example_spec(self):
return {
self.name:
parsing_ops.VarLenFeature(dtypes.int32),
'{}_weights'.format(self.name):
parsing_ops.VarLenFeature(dtypes.float32),
}
@property
def _num_buckets(self):
return 5
def _transform_feature(self, inputs):
return (inputs.get(self.name),
inputs.get('{}_weights'.format(self.name)))
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Populates both id_tensor and weight_tensor."""
ids_and_weights = inputs.get(self)
return _CategoricalColumn.IdWeightPair(
id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
t = _TestColumnWithWeights()
crossed = fc._crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError,
'crossed_column does not support weight_tensor.*{}'.format(t.name)):
get_keras_linear_model_predictions({
t.name:
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[0, 1, 2],
dense_shape=(2, 2)),
'{}_weights'.format(t.name):
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[1., 10., 2.],
dense_shape=(2, 2)),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
def get_linear_model_bias(name='linear_model'):
with variable_scope.variable_scope(name, reuse=True):
return variable_scope.get_variable('bias_weights')
def get_linear_model_column_var(column, name='linear_model'):
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
name + '/' + column.name)[0]
def get_keras_linear_model_predictions(features,
feature_columns,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
cols_to_vars=None):
keras_linear_model = _LinearModel(
feature_columns,
units,
sparse_combiner,
weight_collections,
trainable,
name='linear_model')
retval = keras_linear_model(features) # pylint: disable=not-callable
if cols_to_vars is not None:
cols_to_vars.update(keras_linear_model.cols_to_vars())
return retval
| CrossedColumnTest |
python | google__jax | jax/experimental/mosaic/gpu/fragmented_array.py | {
"start": 10662,
"end": 23298
} | class ____:
"""A FragmentedArray layout derived from a tiling expression.
A logical array is transformed according to the tiling expression, and then
split across warps (within a warpgroup), lanes, and vectorized according to
the dimension indices. All dimension indices must be negative and should refer
to the dimensions after tiling is applied.
To better understand this layout, consider the example of WGMMA-related tiling
from https://docs.nvidia.com/cuda/parallel-thread-execution/#wgmma-64n16-d as
applied to a 128x128 array. The corresponding TiledLayout has a tiling of:
(64, 8)(16, 8)(8, 8)(1, 2)
and warp_dims=(-8,), lane_dims=(-4, -3), vector_dim=-1.
We begin by applying the tiling (note that it always applies to a suffix):
Tiled shape Remaining tiling actions
===========================================================================
128 128 (64, 8)(16, 8)(8, 8)(1, 2)
2 16 64 8 (16, 8)(8, 8)(1, 2)
2 16 4 1 16 8 (8, 8)(1, 2)
2 16 4 1 2 1 8 8 (1, 2)
2 16 4 1 2 1 8 4 1 2
The last expression is our final shape. At this stage, we're ready to partition
the dimensions: warp_dims=(-8,) means that the 8-th dimension from the
end is partitioned over 4 warps in a warpgroup (and so it must be of size 4).
lane_dims=(-4, -3) indicate that those two dimensions are partitioned over
the lanes within a warp (their product must be equal to 32, i.e. warp size).
Finally, vector_dim=-1 indicates that each (logical) register is a vector
containing 2 elements (there are no shape restrictions here).
Given the above, the shape of the (logical) register array used to represent
the array in each thread is: (2, 16, 1, 1, 2, 1, 1, 1, 1, 1). We have set all
the dimensions above to 1, since each thread is a member of a single warp,
a single lane, and the elements along the vectorized dimension are represented
by a single (logical) register.
"""
tiling: Tiling
warp_dims: tuple[int | Replicated, ...] # major-to-minor
lane_dims: tuple[int | Replicated, ...] # major-to-minor
vector_dim: int
# Whether to enforce that the layout is canonical. Users of `TiledLayout`
# should not set this to `False`, but it is helpful to be able to construct
# non-canonical layouts as an intermediate state when implementing layout
# transformations.
_check_canonical: dataclasses.InitVar[bool] = True
def __post_init__(self, _check_canonical: bool):
if not self.tiling.tiles:
raise ValueError("Tiling must have at least one tile")
min_shape = self.tiling.tiles[0]
min_tiled_shape = self.tiling.tile_shape(min_shape)
dims_set = {
*self.partitioned_warp_dims, *self.partitioned_lane_dims, self.vector_dim,
}
if len(dims_set) != len(self.partitioned_warp_dims) + len(self.partitioned_lane_dims) + 1:
raise ValueError("Duplicate partitioning dimensions")
for d in dims_set:
if d >= 0:
raise ValueError("All dimensions must be negative")
if d < -(len(min_tiled_shape) - len(min_shape)):
raise ValueError("Dimension out of range")
warp_dims_prod = math.prod(
d.times if isinstance(d, Replicated) else min_tiled_shape[d]
for d in self.warp_dims
)
if warp_dims_prod != WARPS_IN_WARPGROUP:
raise ValueError(
"The product of warp dims does not equal the number of warps in a"
" warpgroup"
)
lane_dims_prod = math.prod(
d.times if isinstance(d, Replicated) else min_tiled_shape[d]
for d in self.lane_dims
)
if lane_dims_prod != WARP_SIZE:
raise ValueError("The product of lane dims does not equal the warp size")
if _check_canonical:
canonical_layout = self.canonicalize()
if self != canonical_layout:
raise ValueError(f"{self} is not canonical.")
@functools.cached_property
def partitioned_warp_dims(self) -> tuple[int, ...]:
return tuple(
d for d in self.warp_dims if not isinstance(d, Replicated)
)
@functools.cached_property
def partitioned_lane_dims(self) -> tuple[int, ...]:
return tuple(
d for d in self.lane_dims if not isinstance(d, Replicated)
)
def thread_idxs(self, shape: tuple[int, ...]) -> Iterable[tuple[ir.Value, ...]]:
# We first find the linear index and then divide by the shape to
# get the index.
i32 = ir.IntegerType.get_signless(32)
index = ir.IndexType.get()
contig_strides = tuple(utils.get_contiguous_strides(shape))
tile_strides = self.tiling.tile_strides(contig_strides)
dyn_tile_strides = [c(s, i32) for s in tile_strides[-self.tiled_tiling_rank:]]
warp_offset = utils.dyn_dot(self.warp_indices(), dyn_tile_strides)
lane_offset = utils.dyn_dot(self.lane_indices(), dyn_tile_strides)
dyn_offset = arith.addi(warp_offset, lane_offset)
register_shape = self.registers_shape(shape)
for tile_idx in np.ndindex(register_shape):
tile_lin_idx = sum(i * s for i, s in zip(tile_idx, tile_strides))
dyn_lin_idx = arith.addi(dyn_offset, c(tile_lin_idx, i32))
idx = []
for stride in contig_strides:
idx.append(arith.index_castui(index, arith.divui(dyn_lin_idx, c(stride, i32))))
dyn_lin_idx = arith.remui(dyn_lin_idx, c(stride, i32))
yield tuple(idx)
@property
def base_tile_shape(self) -> tuple[int, ...]:
"""The shape of the first tile in the tiling expression.
This tile acts as the divisibility constraint for a suffix of arrays to
which this layout applies.
"""
return self.tiling.tiles[0]
@functools.cached_property
def tiled_tiling_shape(self) -> tuple[int, ...]:
"""The shape of the suffix of the array after tiling.
We only allow our repeated tiling actions to further subdivide the
dimensions created by previous tiling actions (except for the first one),
so the tiled shape always ends with this suffix, no matter what array shape
it's applied to.
"""
base_tile_shape = self.base_tile_shape
return self.tiling.tile_shape(base_tile_shape)[len(base_tile_shape):]
@functools.cached_property
def tiled_tiling_rank(self) -> int:
return len(self.tiled_tiling_shape)
@property
def vector_length(self) -> int:
return self.tiled_tiling_shape[self.vector_dim]
def registers_element_type(self, t: ir.Type) -> ir.Type:
return ir.VectorType.get((self.vector_length,), t)
def registers_shape(self, shape: tuple[int, ...]) -> tuple[int, ...]:
"""Returns the shape of the register array needed to represent an array of the given logical shape."""
tiled_shape = list(self.tiling.tile_shape(shape))
for d in self.partitioned_warp_dims:
tiled_shape[d] = 1
for d in self.partitioned_lane_dims:
tiled_shape[d] = 1
tiled_shape[self.vector_dim] = 1
return tuple(tiled_shape)
def shape_from_registers_shape(self, shape: tuple[int, ...]) -> tuple[int, ...]:
"""Returns the logical shape of an array given its register array shape.
Inverse to `registers_shape`.
"""
tiled_tiling = self.tiled_tiling_shape
shape = list(shape)
for d in self.partitioned_warp_dims:
shape[d] = tiled_tiling[d]
for d in self.partitioned_lane_dims:
shape[d] = tiled_tiling[d]
shape[self.vector_dim] = tiled_tiling[self.vector_dim]
return self.tiling.untile_shape(tuple(shape))
def _delinearize_index(
self, idx: ir.Value, dims: tuple[int | Replicated, ...]
) -> tuple[ir.Value, ...]:
i32 = ir.IntegerType.get_signless(32)
tiled_shape = self.tiled_tiling_shape
dims_shape = tuple(
d.times if isinstance(d, Replicated) else tiled_shape[d]
for d in dims
)
dims_strides = utils.get_contiguous_strides(dims_shape)
dims_indices = tuple(
arith.remui(arith.divui(idx, c(stride, i32)), c(size, i32))
for stride, size in zip(dims_strides, dims_shape)
)
full_indices = [arith.constant(i32, 0)] * len(tiled_shape)
for d, i in zip(dims, dims_indices):
if isinstance(d, Replicated):
continue
full_indices[d] = i
return tuple(full_indices)
def lane_indices(self) -> tuple[ir.Value, ...]:
i32 = ir.IntegerType.get_signless(32)
lane_idx = arith.remui(utils.thread_idx(), c(WARP_SIZE, i32))
return self._delinearize_index(lane_idx, self.lane_dims)
def warp_indices(self) -> tuple[ir.Value, ...]:
i32 = ir.IntegerType.get_signless(32)
warp_idx = arith.remui(
arith.divui(utils.thread_idx(), c(WARP_SIZE, i32)),
c(WARPS_IN_WARPGROUP, i32),
)
return self._delinearize_index(warp_idx, self.warp_dims)
def remove_dimension(self, dim: int) -> TiledLayout:
if dim < 0 or dim >= len(self.tiling.tiles[0]):
raise ValueError(f"Dimension {dim} is out of range for {self.tiling}")
new_tiling = self.tiling.remove_dimension(dim)
tiled_shape = self.tiled_tiling_shape
removed_dim = self.tiling.tile_dimension(dim)
dim_offsets = np.cumsum(removed_dim[::-1])[::-1].tolist()
if removed_dim[self.vector_dim]:
new_tiling = Tiling((*new_tiling.tiles, (1,)))
new_vector_dim = -1
dim_offsets = [o - 1 for o in dim_offsets] # We inserted an extra dim.
else:
new_vector_dim = self.vector_dim + dim_offsets[self.vector_dim]
def replace_tiled_dim(d: int | Replicated, size: int):
if isinstance(d, Replicated):
return d
elif removed_dim[d]:
return Replicated(size)
else:
return d + dim_offsets[d]
return TiledLayout(
new_tiling,
tuple(
d if isinstance(d, Replicated) else replace_tiled_dim(d, tiled_shape[d])
for d in self.warp_dims
),
tuple(
d if isinstance(d, Replicated) else replace_tiled_dim(d, tiled_shape[d])
for d in self.lane_dims
),
new_vector_dim,
_check_canonical=False,
).canonicalize()
def reduce(self, axes: Sequence[int]) -> TiledLayout:
reduced_layout = self
for a in sorted(axes, reverse=True):
reduced_layout = reduced_layout.remove_dimension(a)
return reduced_layout
def canonicalize(self) -> TiledLayout:
"""Returns a version of this layout where tiling is canonical."""
canonical_tiling = self.tiling.canonicalize()
s = self.base_tile_shape
tiled_tiling_shape = self.tiled_tiling_shape
canonical_tiled_tiling_shape = canonical_tiling.tile_shape(s)[len(s):]
offset = len(canonical_tiled_tiling_shape) - 1
rev_removed_dims = []
# Iterate starting from the end in order to eliminate leading dimensions,
# whenever possible. For instance, say we have
#
# shape=(4, 32, 1, 1, 1, 1, 1)
# warp_dims=(-7,),
# lane_dims=(-6,)
# vector_dim=-1
#
# and we want to canonicalize this to
#
# shape=(4, 32, 1)
# warp_dims=(-3,),
# lane_dims=(-2,)
# vector_dim=-1.
#
# After the loop below, we end up with
#
# rev_removed_dims=[False, True, True, True, True, False, False]
#
# which will yield offsets `4` for `warp_dims[0]`, `4` for `lane_dims[0]`,
# and `0` for `vector_dim`.
for d in reversed(tiled_tiling_shape):
if offset >= 0 and d == canonical_tiled_tiling_shape[offset]:
rev_removed_dims.append(False)
offset -= 1
else:
rev_removed_dims.append(True)
assert offset == -1
dim_offsets = np.cumsum(rev_removed_dims)[::-1].tolist()
def replace_tiled_dim(d: int | Replicated):
return d if isinstance(d, Replicated) else d + dim_offsets[d]
def is_nontrivial(d: int | Replicated):
return isinstance(d, Replicated) or tiled_tiling_shape[d] != 1
return TiledLayout(
canonical_tiling,
tuple(replace_tiled_dim(d) for d in self.warp_dims if is_nontrivial(d)),
tuple(replace_tiled_dim(d) for d in self.lane_dims if is_nontrivial(d)),
replace_tiled_dim(self.vector_dim),
_check_canonical=False,
)
def _tiled_wgmma_layout(shape: tuple[int, ...]):
"""Returns the tiled layout relevant for WGMMA operations.
The tiled layout is equivalent to one described here in PTX documentation:
https://docs.nvidia.com/cuda/parallel-thread-execution/#wgmma-64n16-d
"""
if len(shape) != 2:
raise ValueError(f"Shape {shape} is not 2D")
if shape[0] % 64 != 0 or shape[1] % 8 != 0:
raise ValueError(f"Shape {shape} is not a multiple of 64x8")
return WGMMA_LAYOUT
@dataclasses.dataclass(frozen=True)
| TiledLayout |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/repository.py | {
"start": 510,
"end": 1890
} | class ____(GitHubRepositoryProvider):
name = "GitHub Enterprise"
repo_provider = IntegrationProviderSlug.GITHUB_ENTERPRISE.value
def _validate_repo(self, client, installation, repo):
try:
repo_data = client.get_repo(repo)
except Exception as e:
raise installation.raise_error(e)
try:
# make sure installation has access to this specific repo
client.get_commits(repo)
except ApiError:
raise IntegrationError(f"You must grant Sentry access to {repo}")
return repo_data
def build_repository_config(
self, organization: RpcOrganization, data: dict[str, Any]
) -> RepositoryConfig:
integration = integration_service.get_integration(
integration_id=data["integration_id"], provider=self.repo_provider
)
if integration is None:
raise IntegrationError("Could not find the requested GitHub Enterprise integration")
base_url = integration.metadata["domain_name"].split("/")[0]
return {
"name": data["identifier"],
"external_id": data["external_id"],
"url": "https://{}/{}".format(base_url, data["identifier"]),
"config": {"name": data["identifier"]},
"integration_id": data["integration_id"],
}
| GitHubEnterpriseRepositoryProvider |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/path_registry.py | {
"start": 15773,
"end": 20766
} | class ____(PathRegistry):
__slots__ = (
"prop",
"parent",
"path",
"natural_path",
"has_entity",
"entity",
"mapper",
"_wildcard_path_loader_key",
"_default_path_loader_key",
"_loader_key",
"is_unnatural",
)
inherit_cache = True
is_property = True
prop: StrategizedProperty[Any]
mapper: Optional[Mapper[Any]]
entity: Optional[_InternalEntityType[Any]]
def __init__(
self, parent: _AbstractEntityRegistry, prop: StrategizedProperty[Any]
):
# restate this path in terms of the
# given StrategizedProperty's parent.
insp = cast("_InternalEntityType[Any]", parent[-1])
natural_parent: _AbstractEntityRegistry = parent
# inherit "is_unnatural" from the parent
self.is_unnatural = parent.parent.is_unnatural or bool(
parent.mapper.inherits
)
if not insp.is_aliased_class or insp._use_mapper_path: # type: ignore
parent = natural_parent = parent.parent[prop.parent]
elif (
insp.is_aliased_class
and insp.with_polymorphic_mappers
and prop.parent in insp.with_polymorphic_mappers
):
subclass_entity: _InternalEntityType[Any] = parent[-1]._entity_for_mapper(prop.parent) # type: ignore # noqa: E501
parent = parent.parent[subclass_entity]
# when building a path where with_polymorphic() is in use,
# special logic to determine the "natural path" when subclass
# entities are used.
#
# here we are trying to distinguish between a path that starts
# on a with_polymorphic entity vs. one that starts on a
# normal entity that introduces a with_polymorphic() in the
# middle using of_type():
#
# # as in test_polymorphic_rel->
# # test_subqueryload_on_subclass_uses_path_correctly
# wp = with_polymorphic(RegularEntity, "*")
# sess.query(wp).options(someload(wp.SomeSubEntity.foos))
#
# vs
#
# # as in test_relationship->JoinedloadWPolyOfTypeContinued
# wp = with_polymorphic(SomeFoo, "*")
# sess.query(RegularEntity).options(
# someload(RegularEntity.foos.of_type(wp))
# .someload(wp.SubFoo.bar)
# )
#
# in the former case, the Query as it generates a path that we
# want to match will be in terms of the with_polymorphic at the
# beginning. in the latter case, Query will generate simple
# paths that don't know about this with_polymorphic, so we must
# use a separate natural path.
#
#
if parent.parent:
natural_parent = parent.parent[subclass_entity.mapper]
self.is_unnatural = True
else:
natural_parent = parent
elif (
natural_parent.parent
and insp.is_aliased_class
and prop.parent # this should always be the case here
is not insp.mapper
and insp.mapper.isa(prop.parent)
):
natural_parent = parent.parent[prop.parent]
self.prop = prop
self.parent = parent
self.path = parent.path + (prop,)
self.natural_path = natural_parent.natural_path + (prop,)
self.has_entity = prop._links_to_entity
if prop._is_relationship:
if TYPE_CHECKING:
assert isinstance(prop, RelationshipProperty)
self.entity = prop.entity
self.mapper = prop.mapper
else:
self.entity = None
self.mapper = None
self._wildcard_path_loader_key = (
"loader",
parent.natural_path + self.prop._wildcard_token,
)
self._default_path_loader_key = self.prop._default_path_loader_key
self._loader_key = ("loader", self.natural_path)
def _truncate_recursive(self) -> _PropRegistry:
earliest = None
for i, token in enumerate(reversed(self.path[:-1])):
if token is self.prop:
earliest = i
if earliest is None:
return self
else:
return self.coerce(self.path[0 : -(earliest + 1)]) # type: ignore
@property
def entity_path(self) -> _AbstractEntityRegistry:
assert self.entity is not None
return self[self.entity]
def _getitem(
self, entity: Union[int, slice, _InternalEntityType[Any]]
) -> Union[_AbstractEntityRegistry, _PathElementType, _PathRepresentation]:
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return _SlotsEntityRegistry(self, entity)
if not TYPE_CHECKING:
__getitem__ = _getitem
| _PropRegistry |
python | doocs__leetcode | solution/0800-0899/0853.Car Fleet/Solution.py | {
"start": 0,
"end": 361
} | class ____:
def carFleet(self, target: int, position: List[int], speed: List[int]) -> int:
idx = sorted(range(len(position)), key=lambda i: position[i])
ans = pre = 0
for i in idx[::-1]:
t = (target - position[i]) / speed[i]
if t > pre:
ans += 1
pre = t
return ans
| Solution |
python | pytorch__pytorch | test/test_fake_tensor.py | {
"start": 94599,
"end": 98065
} | class ____(TestCase):
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_fake_tensor_prefer_device_type(self):
"""
Test that fake_tensor_prefer_device_type configuration works correctly
for device mismatch scenarios.
"""
# Create a custom operation that would normally cause device mismatch
def mixed_device_op(a, b):
# This simulates an operation where 'a' is on MTIA/CUDA but 'b' is created on CPU
cpu_tensor = torch.arange(a.shape[0], device="cpu")
return a + cpu_tensor.unsqueeze(-1)
with FakeTensorMode():
# Test default behavior (should raise error on device mismatch)
cuda_tensor = torch.randn(3, 4, device="cuda")
# Without the config, this should raise a device mismatch error
with self.assertRaisesRegex(
RuntimeError, "Unhandled FakeTensor Device Propagation"
):
mixed_device_op(cuda_tensor, None)
# Test with prefer_device_type set to "cuda"
with torch._functorch.config.patch(fake_tensor_prefer_device_type="cuda"):
with FakeTensorMode():
cuda_tensor = torch.randn(3, 4, device="cuda")
# This should now work and prefer the CUDA device
result = mixed_device_op(cuda_tensor, None)
# The result should be on CUDA device (preferred device type)
self.assertEqual(result.device.type, "cuda")
self.assertEqual(result.shape, (3, 4))
self.assertTrue(isinstance(result, FakeTensor))
# Test that the configuration doesn't affect normal operations
with torch._functorch.config.patch(fake_tensor_prefer_device_type="cuda"):
with FakeTensorMode():
# Normal same-device operations should work as before
x = torch.randn(2, 3, device="cuda")
y = torch.randn(2, 3, device="cuda")
result = x + y
self.assertEqual(result.device.type, "cuda")
# CPU operations should still work
x_cpu = torch.randn(2, 3, device="cpu")
y_cpu = torch.randn(2, 3, device="cpu")
result_cpu = x_cpu + y_cpu
self.assertEqual(result_cpu.device.type, "cpu")
# Test that the configuration is properly scoped
with FakeTensorMode():
cuda_tensor = torch.randn(3, 4, device="cuda")
# After exiting the config context, should raise error again
with self.assertRaisesRegex(
RuntimeError, "Unhandled FakeTensor Device Propagation"
):
mixed_device_op(cuda_tensor, None)
def test_fake_tensor_prefer_device_type_cpu_only(self):
"""
Test that fake_tensor_prefer_device_type works correctly when only CPU tensors are involved.
"""
with torch._functorch.config.patch(fake_tensor_prefer_device_type="cuda"):
with FakeTensorMode():
# When all tensors are CPU, the result should still be CPU
x = torch.randn(2, 3, device="cpu")
y = torch.randn(2, 3, device="cpu")
result = x + y
self.assertEqual(result.device.type, "cpu")
self.assertTrue(isinstance(result, FakeTensor))
if __name__ == "__main__":
run_tests()
| FakeTensorPreferDeviceType |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 23521,
"end": 24605
} | class ____(PreTrainedModel):
# Weight initialization
config: PatchTSMixerConfig
base_model_prefix = "model"
main_input_name = "past_values"
input_modalities = ("time",)
supports_gradient_checkpointing = False
@torch.no_grad()
def _init_weights(self, module):
"""Initialize weights"""
if isinstance(module, PatchTSMixerPositionalEncoding):
# initialize positional encoding
if self.config.positional_encoding_type == "random":
init.normal_(module.position_enc, mean=0.0, std=0.1)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm1d)):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, PatchTSMixerBatchNorm):
init.zeros_(module.batchnorm.bias)
init.ones_(module.batchnorm.weight)
elif isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.init_std)
if module.bias is not None:
init.zeros_(module.bias)
| PatchTSMixerPreTrainedModel |
python | mlflow__mlflow | mlflow/gateway/providers/anthropic.py | {
"start": 659,
"end": 11777
} | class ____(ProviderAdapter):
@classmethod
def chat_to_model(cls, payload, config):
key_mapping = {"stop": "stop_sequences"}
payload["model"] = config.model.name
payload = rename_payload_keys(payload, key_mapping)
if "top_p" in payload and "temperature" in payload:
raise AIGatewayException(
status_code=422, detail="Cannot set both 'temperature' and 'top_p' parameters."
)
max_tokens = payload.get("max_tokens", MLFLOW_AI_GATEWAY_ANTHROPIC_DEFAULT_MAX_TOKENS)
if max_tokens > MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS:
raise AIGatewayException(
status_code=422,
detail="Invalid value for max_tokens: cannot exceed "
f"{MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS}.",
)
payload["max_tokens"] = max_tokens
if payload.pop("n", 1) != 1:
raise AIGatewayException(
status_code=422,
detail="'n' must be '1' for the Anthropic provider. Received value: '{n}'.",
)
# Cohere uses `system` to set the system message
# we concatenate all system messages from the user with a newline
if system_messages := [m for m in payload["messages"] if m["role"] == "system"]:
payload["system"] = "\n".join(m["content"] for m in system_messages)
# remaining messages are chat history
# we want to include only user, assistant or tool messages
# Anthropic format of tool related messages example
# https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#tool-use-examples
converted_messages = []
for m in payload["messages"]:
if m["role"] == "user":
converted_messages.append(m)
elif m["role"] == "assistant":
if m.get("tool_calls") is not None:
tool_use_contents = [
{
"type": "tool_use",
"id": tool_call["id"],
"name": tool_call["function"]["name"],
"input": json.loads(tool_call["function"]["arguments"]),
}
for tool_call in m["tool_calls"]
]
m["content"] = tool_use_contents
m.pop("tool_calls")
converted_messages.append(m)
elif m["role"] == "tool":
converted_messages.append(
{
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": m["tool_call_id"],
"content": m["content"],
}
],
}
)
else:
_logger.info(f"Discarded unknown message: {m}")
payload["messages"] = converted_messages
# The range of Anthropic's temperature is 0-1, but ours is 0-2, so we halve it
if "temperature" in payload:
payload["temperature"] = 0.5 * payload["temperature"]
# convert tool definition to Anthropic format
if tools := payload.pop("tools", None):
converted_tools = []
for tool in tools:
if tool["type"] != "function":
raise AIGatewayException(
status_code=422,
detail=(
"Only function calling tool is supported, but received tool type "
f"{tool['type']}"
),
)
tool_function = tool["function"]
converted_tools.append(
{
"name": tool_function["name"],
"description": tool_function["description"],
"input_schema": tool_function["parameters"],
}
)
payload["tools"] = converted_tools
return payload
@classmethod
def model_to_chat(cls, resp, config):
# API reference: https://docs.anthropic.com/en/api/messages#body-messages
#
# Example response:
# ```
# {
# "content": [
# {
# "text": "Blue is often seen as a calming and soothing color.",
# "type": "text"
# },
# {
# "type": "tool_use",
# "id": "toolu_011UYCoc...",
# "name": "get_weather",
# "input": { "city": "Singapore" }
# },
# {
# "source": {
# "type": "base64",
# "media_type": "image/jpeg",
# "data": "/9j/4AAQSkZJRg...",
# "type": "image",
# }
# }
# ],
# "id": "msg_013Zva2CMHLNnXjNJJKqJ2EF",
# "model": "claude-2.1",
# "role": "assistant",
# "stop_reason": "end_turn",
# "stop_sequence": null,
# "type": "message",
# "usage": {
# "input_tokens": 10,
# "output_tokens": 25
# }
# }
# ```
from mlflow.anthropic.chat import convert_message_to_mlflow_chat
stop_reason = "length" if resp["stop_reason"] == "max_tokens" else "stop"
return chat.ResponsePayload(
id=resp["id"],
created=int(time.time()),
object="chat.completion",
model=resp["model"],
choices=[
chat.Choice(
index=0,
# TODO: Remove this casting once
# https://github.com/mlflow/mlflow/pull/14160 is merged
message=chat.ResponseMessage(
**convert_message_to_mlflow_chat(resp).model_dump()
),
finish_reason=stop_reason,
)
],
usage=chat.ChatUsage(
prompt_tokens=resp["usage"]["input_tokens"],
completion_tokens=resp["usage"]["output_tokens"],
total_tokens=resp["usage"]["input_tokens"] + resp["usage"]["output_tokens"],
),
)
@classmethod
def chat_streaming_to_model(cls, payload, config):
return cls.chat_to_model(payload, config)
@classmethod
def model_to_chat_streaming(cls, resp, config):
content = resp.get("delta") or resp.get("content_block") or {}
if (stop_reason := content.get("stop_reason")) is not None:
stop_reason = "length" if stop_reason == "max_tokens" else "stop"
# example of function calling delta message format:
# https://platform.openai.com/docs/guides/function-calling#streaming
if content.get("type") == "tool_use":
delta = chat.StreamDelta(
tool_calls=[
ToolCallDelta(
index=0,
id=content.get("id"),
type="function",
function=Function(name=content.get("name")),
)
]
)
elif content.get("type") == "input_json_delta":
delta = chat.StreamDelta(
tool_calls=[
ToolCallDelta(index=0, function=Function(arguments=content.get("partial_json")))
]
)
else:
delta = chat.StreamDelta(
role=None,
content=content.get("text"),
)
return chat.StreamResponsePayload(
id=resp["id"],
created=int(time.time()),
model=resp["model"],
choices=[
chat.StreamChoice(
index=resp["index"],
finish_reason=stop_reason,
delta=delta,
)
],
)
@classmethod
def model_to_completions(cls, resp, config):
stop_reason = "stop" if resp["stop_reason"] == "stop_sequence" else "length"
return completions.ResponsePayload(
created=int(time.time()),
object="text_completion",
model=resp["model"],
choices=[
completions.Choice(
index=0,
text=resp["completion"],
finish_reason=stop_reason,
)
],
usage=completions.CompletionsUsage(
prompt_tokens=None,
completion_tokens=None,
total_tokens=None,
),
)
@classmethod
def completions_to_model(cls, payload, config):
key_mapping = {"max_tokens": "max_tokens_to_sample", "stop": "stop_sequences"}
payload["model"] = config.model.name
if "top_p" in payload:
raise AIGatewayException(
status_code=422,
detail="Cannot set both 'temperature' and 'top_p' parameters. "
"Please use only the temperature parameter for your query.",
)
max_tokens = payload.get("max_tokens", MLFLOW_AI_GATEWAY_ANTHROPIC_DEFAULT_MAX_TOKENS)
if max_tokens > MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS:
raise AIGatewayException(
status_code=422,
detail="Invalid value for max_tokens: cannot exceed "
f"{MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS}.",
)
payload["max_tokens"] = max_tokens
if payload.get("stream", False):
raise AIGatewayException(
status_code=422,
detail="Setting the 'stream' parameter to 'true' is not supported with the MLflow "
"Gateway.",
)
n = payload.pop("n", 1)
if n != 1:
raise AIGatewayException(
status_code=422,
detail=f"'n' must be '1' for the Anthropic provider. Received value: '{n}'.",
)
payload = rename_payload_keys(payload, key_mapping)
if payload["prompt"].startswith("Human: "):
payload["prompt"] = "\n\n" + payload["prompt"]
if not payload["prompt"].startswith("\n\nHuman: "):
payload["prompt"] = "\n\nHuman: " + payload["prompt"]
if not payload["prompt"].endswith("\n\nAssistant:"):
payload["prompt"] = payload["prompt"] + "\n\nAssistant:"
# The range of Anthropic's temperature is 0-1, but ours is 0-2, so we halve it
if "temperature" in payload:
payload["temperature"] = 0.5 * payload["temperature"]
return payload
@classmethod
def embeddings_to_model(cls, payload, config):
raise NotImplementedError
@classmethod
def model_to_embeddings(cls, resp, config):
raise NotImplementedError
| AnthropicAdapter |
python | pytorch__pytorch | torch/fx/experimental/symbolic_shapes.py | {
"start": 103906,
"end": 104886
} | class ____(_ShapeGuardPrinter, CppPrinter):
def __init__(self, *args: Any) -> None:
self.all_symbols: set[str] = set()
self.source_to_symbol: dict[Source, sympy.Symbol] = {}
super().__init__(*args)
def print_source(self, source: Source) -> str:
if source in self.source_to_symbol:
return self.source_to_symbol[source].name
source_name = source.name()
mangled_name = re.sub("[^0-9a-zA-Z_]+", "_", source_name)
old_mangled_name = mangled_name
count = 0
while mangled_name in self.all_symbols:
mangled_name = f"{old_mangled_name}_{count}"
count += 1
self.source_to_symbol[source] = sympy.Symbol(mangled_name)
self.all_symbols.add(mangled_name)
return mangled_name
def doprint(self, expr: sympy.Expr) -> str:
return CppPrinter.doprint(self, expr)
# A dataclass for storing shape guards
@dataclass(frozen=True)
| _ShapeGuardCppPrinter |
python | keras-team__keras | keras/src/utils/sequence_utils_test.py | {
"start": 75,
"end": 4558
} | class ____(testing.TestCase):
def test_pad_sequences(self):
a = [[1], [1, 2], [1, 2, 3]]
# test padding
b = sequence_utils.pad_sequences(a, maxlen=3, padding="pre")
self.assertAllClose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]])
b = sequence_utils.pad_sequences(a, maxlen=3, padding="post")
self.assertAllClose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]])
# test truncating
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="pre")
self.assertAllClose(b, [[0, 1], [1, 2], [2, 3]])
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="post")
self.assertAllClose(b, [[0, 1], [1, 2], [1, 2]])
# test value
b = sequence_utils.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]])
def test_pad_sequences_float(self):
a = [[1.2], [1.2, 2.3], [1.2, 2.3, 3.4]]
# test padding
b = sequence_utils.pad_sequences(
a, maxlen=3, padding="pre", dtype="float32"
)
self.assertAllClose(b, [[0, 0, 1.2], [0, 1.2, 2.3], [1.2, 2.3, 3.4]])
b = sequence_utils.pad_sequences(
a, maxlen=3, padding="post", dtype="float32"
)
self.assertAllClose(b, [[1.2, 0, 0], [1.2, 2.3, 0], [1.2, 2.3, 3.4]])
# test truncating
b = sequence_utils.pad_sequences(
a, maxlen=2, truncating="pre", dtype="float32"
)
self.assertAllClose(b, [[0, 1.2], [1.2, 2.3], [2.3, 3.4]])
b = sequence_utils.pad_sequences(
a, maxlen=2, truncating="post", dtype="float32"
)
self.assertAllClose(b, [[0, 1.2], [1.2, 2.3], [1.2, 2.3]])
# test value
b = sequence_utils.pad_sequences(a, maxlen=3, value=1, dtype="float32")
self.assertAllClose(b, [[1, 1, 1.2], [1, 1.2, 2.3], [1.2, 2.3, 3.4]])
def test_pad_sequences_str(self):
a = [["1"], ["1", "2"], ["1", "2", "3"]]
# test padding
b = sequence_utils.pad_sequences(
a, maxlen=3, padding="pre", value="pad", dtype=object
)
self.assertAllEqual(
b, [["pad", "pad", "1"], ["pad", "1", "2"], ["1", "2", "3"]]
)
b = sequence_utils.pad_sequences(
a, maxlen=3, padding="post", value="pad", dtype="<U3"
)
self.assertAllEqual(
b, [["1", "pad", "pad"], ["1", "2", "pad"], ["1", "2", "3"]]
)
# test truncating
b = sequence_utils.pad_sequences(
a, maxlen=2, truncating="pre", value="pad", dtype=object
)
self.assertAllEqual(b, [["pad", "1"], ["1", "2"], ["2", "3"]])
b = sequence_utils.pad_sequences(
a, maxlen=2, truncating="post", value="pad", dtype="<U3"
)
self.assertAllEqual(b, [["pad", "1"], ["1", "2"], ["1", "2"]])
with self.assertRaisesRegex(
ValueError, "`dtype` int32 is not compatible with "
):
sequence_utils.pad_sequences(
a, maxlen=2, truncating="post", value="pad"
)
def test_pad_sequences_vector(self):
a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]
# test padding
b = sequence_utils.pad_sequences(a, maxlen=3, padding="pre")
self.assertAllClose(
b,
[
[[0, 0], [0, 0], [1, 1]],
[[0, 0], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]],
],
)
b = sequence_utils.pad_sequences(a, maxlen=3, padding="post")
self.assertAllClose(
b,
[
[[1, 1], [0, 0], [0, 0]],
[[2, 1], [2, 2], [0, 0]],
[[3, 1], [3, 2], [3, 3]],
],
)
# test truncating
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="pre")
self.assertAllClose(
b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 2], [3, 3]]]
)
b = sequence_utils.pad_sequences(a, maxlen=2, truncating="post")
self.assertAllClose(
b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2]]]
)
# test value
b = sequence_utils.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(
b,
[
[[1, 1], [1, 1], [1, 1]],
[[1, 1], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]],
],
)
| PadSequencesTest |
python | tornadoweb__tornado | tornado/test/routing_test.py | {
"start": 4997,
"end": 5666
} | class ____(HTTPServerConnectionDelegate):
def start_request(self, server_conn, request_conn):
class MessageDelegate(HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def finish(self):
response_body = b"OK"
self.connection.write_headers(
ResponseStartLine("HTTP/1.1", 200, "OK"),
HTTPHeaders({"Content-Length": str(len(response_body))}),
)
self.connection.write(response_body)
self.connection.finish()
return MessageDelegate(request_conn)
| ConnectionDelegate |
python | scrapy__scrapy | tests/AsyncCrawlerProcess/asyncio_enabled_no_reactor.py | {
"start": 125,
"end": 313
} | class ____:
def __init__(self):
if not is_asyncio_reactor_installed():
raise RuntimeError("ReactorCheckExtension requires the asyncio reactor.")
| ReactorCheckExtension |
python | celery__celery | t/unit/utils/test_functional.py | {
"start": 8567,
"end": 10884
} | class ____:
def test_from_cls(self):
class X:
def __call__(x, y, kwarg=1):
pass
g = head_from_fun(X())
with pytest.raises(TypeError):
g(1)
g(1, 2)
g(1, 2, kwarg=3)
def test_from_fun(self):
def f(x, y, kwarg=1):
pass
g = head_from_fun(f)
with pytest.raises(TypeError):
g(1)
g(1, 2)
g(1, 2, kwarg=3)
def test_regression_3678(self):
local = {}
fun = ('def f(foo, *args, bar="", **kwargs):'
' return foo, args, bar')
exec(fun, {}, local)
g = head_from_fun(local['f'])
g(1)
g(1, 2, 3, 4, bar=100)
with pytest.raises(TypeError):
g(bar=100)
def test_from_fun_with_hints(self):
local = {}
fun = ('def f_hints(x: int, y: int, kwarg: int=1):'
' pass')
exec(fun, {}, local)
f_hints = local['f_hints']
g = head_from_fun(f_hints)
with pytest.raises(TypeError):
g(1)
g(1, 2)
g(1, 2, kwarg=3)
def test_from_fun_forced_kwargs(self):
local = {}
fun = ('def f_kwargs(*, a, b="b", c=None):'
' return')
exec(fun, {}, local)
f_kwargs = local['f_kwargs']
g = head_from_fun(f_kwargs)
with pytest.raises(TypeError):
g(1)
g(a=1)
g(a=1, b=2)
g(a=1, b=2, c=3)
def test_classmethod(self):
class A:
@classmethod
def f(cls, x):
return x
fun = head_from_fun(A.f, bound=False)
assert fun(A, 1) == 1
fun = head_from_fun(A.f, bound=True)
assert fun(1) == 1
def test_kwonly_required_args(self):
local = {}
fun = ('def f_kwargs_required(*, a="a", b, c=None):'
' return')
exec(fun, {}, local)
f_kwargs_required = local['f_kwargs_required']
g = head_from_fun(f_kwargs_required)
with pytest.raises(TypeError):
g(1)
with pytest.raises(TypeError):
g(a=1)
with pytest.raises(TypeError):
g(c=1)
with pytest.raises(TypeError):
g(a=2, c=1)
g(b=3)
| test_head_from_fun |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_update_delete.py | {
"start": 460,
"end": 4008
} | class ____(fixtures.TablesTest):
run_deletes = "each"
__requires__ = ("sane_rowcount",)
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"plain_pk",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.plain_pk.insert(),
[
{"id": 1, "data": "d1"},
{"id": 2, "data": "d2"},
{"id": 3, "data": "d3"},
],
)
def test_update(self, connection):
t = self.tables.plain_pk
r = connection.execute(
t.update().where(t.c.id == 2), dict(data="d2_new")
)
assert not r.is_insert
assert not r.returns_rows
assert r.rowcount == 1
eq_(
connection.execute(t.select().order_by(t.c.id)).fetchall(),
[(1, "d1"), (2, "d2_new"), (3, "d3")],
)
def test_delete(self, connection):
t = self.tables.plain_pk
r = connection.execute(t.delete().where(t.c.id == 2))
assert not r.is_insert
assert not r.returns_rows
assert r.rowcount == 1
eq_(
connection.execute(t.select().order_by(t.c.id)).fetchall(),
[(1, "d1"), (3, "d3")],
)
@testing.variation("criteria", ["rows", "norows", "emptyin"])
@testing.requires.update_returning
def test_update_returning(self, connection, criteria):
t = self.tables.plain_pk
stmt = t.update().returning(t.c.id, t.c.data)
if criteria.norows:
stmt = stmt.where(t.c.id == 10)
elif criteria.rows:
stmt = stmt.where(t.c.id == 2)
elif criteria.emptyin:
stmt = stmt.where(t.c.id.in_([]))
else:
criteria.fail()
r = connection.execute(stmt, dict(data="d2_new"))
assert not r.is_insert
assert r.returns_rows
eq_(r.keys(), ["id", "data"])
if criteria.rows:
eq_(r.all(), [(2, "d2_new")])
else:
eq_(r.all(), [])
eq_(
connection.execute(t.select().order_by(t.c.id)).fetchall(),
(
[(1, "d1"), (2, "d2_new"), (3, "d3")]
if criteria.rows
else [(1, "d1"), (2, "d2"), (3, "d3")]
),
)
@testing.variation("criteria", ["rows", "norows", "emptyin"])
@testing.requires.delete_returning
def test_delete_returning(self, connection, criteria):
t = self.tables.plain_pk
stmt = t.delete().returning(t.c.id, t.c.data)
if criteria.norows:
stmt = stmt.where(t.c.id == 10)
elif criteria.rows:
stmt = stmt.where(t.c.id == 2)
elif criteria.emptyin:
stmt = stmt.where(t.c.id.in_([]))
else:
criteria.fail()
r = connection.execute(stmt)
assert not r.is_insert
assert r.returns_rows
eq_(r.keys(), ["id", "data"])
if criteria.rows:
eq_(r.all(), [(2, "d2")])
else:
eq_(r.all(), [])
eq_(
connection.execute(t.select().order_by(t.c.id)).fetchall(),
(
[(1, "d1"), (3, "d3")]
if criteria.rows
else [(1, "d1"), (2, "d2"), (3, "d3")]
),
)
__all__ = ("SimpleUpdateDeleteTest",)
| SimpleUpdateDeleteTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image33.py | {
"start": 315,
"end": 1051
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image33.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("D:D", 3.86)
worksheet.set_column("E:E", 1.43)
worksheet.set_row(7, 7.5)
worksheet.set_row(8, 9.75)
worksheet.insert_image(
"E9", self.image_dir + "red.png", {"x_offset": -2, "y_offset": -1}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_gather_nd_op_test.py | {
"start": 1241,
"end": 11547
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
DOCSTRING_PARAMS = [[['000', '001'], ['010']],
[['100'], ['110', '111', '112'], ['120']],
[[], ['210']]] # pyformat: disable
@parameterized.parameters([
#=========================================================================
# Docstring Examples
#=========================================================================
dict(
descr='Docstring example 1',
params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS),
indices=[[2], [0]],
expected=ragged_factory_ops.constant_value(
[[[], [b'210']], [[b'000', b'001'], [b'010']]])),
dict(
descr='Docstring example 2',
params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS),
indices=[[2, 1], [0, 0]],
expected=ragged_factory_ops.constant_value(
[[b'210'], [b'000', b'001']])),
dict(
descr='Docstring example 3',
params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS),
indices=[[0, 0, 1], [1, 1, 2]],
expected=[b'001', b'112']),
#=========================================================================
# Indices with 0 values (selects the entire params)
#=========================================================================
dict(
descr='params: [B1, (B2)], indices: [0], result: [B1, (B2)]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=np.zeros([0], dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[b'a', b'b', b'c'], [b'd']])),
dict(
descr='params: [B1, (B2)], indices: [A1, 0], result: [A1, B1, (B2)]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=np.zeros([3, 0], dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']]])),
dict(
descr=('params: [B1, (B2)], indices: [A1, A2, 0], '
'result: [A1, A2, B1, (B2)]'),
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=np.zeros([1, 3, 0], dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']]]])),
dict(
descr='params: [B1], indices: [A1, (A2), 0], result: [A1, (A2), B1]',
params=['a'],
indices=ragged_factory_ops.constant_value(
[[[], []], [[]]],
ragged_rank=1,
dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[[b'a'], [b'a']], [[b'a']]],
ragged_rank=1)),
#=========================================================================
# Indices with 1 value (selects row from params)
#=========================================================================
dict(
descr='params: [B1, (B2)], indices: [A1, 1], result: [A1, (B2)]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=[[1], [0]],
expected=ragged_factory_ops.constant_value(
[[b'd'], [b'a', b'b', b'c']])),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, 1], '
'result: [A1, (B2), (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[1], [1]],
expected=ragged_factory_ops.constant_value(
[[[b'e', b'f']], [[b'e', b'f']]])),
dict(
descr=('params: [B1, B2, B3], indices: [A1, (A2), 1], '
'result: [A1, (A2), B2, B3]'),
params=[[['a']], [['b']]],
indices=ragged_factory_ops.constant_value([[[0]]], ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[[b'a']]]], ragged_rank=1)),
#=========================================================================
# Indices with 2 values (selects row & col from params)
#=========================================================================
dict(
descr='params: [B1, (B2)], indices: [A1, 2], result: [A1]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=[[1, 0], [0, 0], [0, 2]],
expected=ragged_factory_ops.constant_value([b'd', b'a', b'c'])),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, 2], '
'result: [A1, (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[1, 0], [0, 1], [0, 0]],
expected=ragged_factory_ops.constant_value(
[[b'e', b'f'], [b'd'], [b'a', b'b', b'c']])),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, A2, 2], '
'result: [A1, (A2), (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[[1, 0], [0, 1], [0, 0]]],
expected=ragged_factory_ops.constant_value(
[[[b'e', b'f'], [b'd'], [b'a', b'b', b'c']]])),
dict(
descr=('params: [B1, (B2), B3], indices: [A1, A2, 2], '
'result: [A1, A2, B3]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b'], ['c', 'd']],
[['e', 'f']]],
ragged_rank=1),
indices=[[[1, 0], [0, 1], [0, 0]]],
expected=[[[b'e', b'f'], [b'c', b'd'], [b'a', b'b']]]),
dict(
descr=('params: [B1, (B2), B3], indices: [A1, A2, A3, 2], '
'result: [A1, A2, A3, B3]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b'], ['c', 'd']],
[['e', 'f']]],
ragged_rank=1),
indices=[[[[1, 0], [0, 1], [0, 0]]]],
expected=[[[[b'e', b'f'], [b'c', b'd'], [b'a', b'b']]]]),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, (A2), 2], '
'result: [A1, (A2), (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=ragged_factory_ops.constant_value(
[[[1, 0], [0, 1]], [[0, 0]]],
ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[b'e', b'f'], [b'd']], [[b'a', b'b', b'c']]])),
#=========================================================================
# Indices with 3 values
#=========================================================================
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, 3], '
'result: [A1]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[1, 0, 1], [0, 0, 0], [0, 1, 0]],
expected=[b'f', b'a', b'd']),
dict(
descr=('params: [B1, (B2), B3], indices: [A1, 3], '
'result: [A1]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
ragged_rank=1),
indices=[[1, 0, 1], [0, 0, 0], [0, 1, 1]],
expected=[b'f', b'a', b'd']),
dict(
descr=('params: [B1, (B2), (B3), B4], indices: [A1, 3], '
'result: [A1, B4]'),
params=ragged_factory_ops.constant_value(
[[[['a', 'b'], ['c', 'd']], [['e', 'f']]]],
ragged_rank=2),
indices=[[0, 0, 1], [0, 0, 0], [0, 1, 0]],
expected=[[b'c', b'd'], [b'a', b'b'], [b'e', b'f']]),
dict(
descr=('Pass through bad_indices_policy for non ragged params+indices'),
params=[1, 3, 5, 7],
indices=[[3], [999], [1], [0]],
expected=[7, 0, 3, 1],
bad_indices_policy='IGNORE'),
]) # pyformat: disable
def testRaggedGatherNd(
self, descr, params, indices, expected, bad_indices_policy=''
):
result = ragged_gather_ops.gather_nd(
params, indices, bad_indices_policy=bad_indices_policy
)
self.assertAllEqual(result, expected)
def testRaggedGatherNdUnknownRankError(self):
if context.executing_eagerly():
return
params = ragged_factory_ops.constant([['a', 'b'], ['c', 'd']])
indices1 = array_ops.placeholder(dtypes.int32, shape=None)
indices2 = array_ops.placeholder(dtypes.int32, shape=[None])
with self.assertRaisesRegex(ValueError,
'indices.rank be statically known.'):
ragged_gather_ops.gather_nd(params, indices1)
with self.assertRaisesRegex(
ValueError, r'indices.shape\[-1\] must be statically known.'):
ragged_gather_ops.gather_nd(params, indices2)
@parameterized.parameters([
dict(
params=['a'],
indices=0,
error=(ValueError, errors.InvalidArgumentError),
),
dict(
params=ragged_factory_ops.constant_value([['a']]),
indices=0,
message='indices.rank must be at least 1.',
),
dict(
params=['a', 'b', 'c'],
indices=ragged_factory_ops.constant_value([[0]]),
message='The innermost dimension of indices may not be ragged',
),
dict(
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=np.zeros([1, 3, 0], dtype=np.int32),
bad_indices_policy='IGNORE',
message=(
'non-default bad_indices_policy not supported for ragged gather'
),
),
])
def testRaggedGatherNdStaticError(
self,
params,
indices,
bad_indices_policy='',
message=None,
error=ValueError,
):
with self.assertRaisesRegex(error, message):
ragged_gather_ops.gather_nd(
params, indices, bad_indices_policy=bad_indices_policy
)
if __name__ == '__main__':
googletest.main()
| RaggedGatherNdOpTest |
python | pyinstaller__pyinstaller | bootloader/waflib/Build.py | {
"start": 26353,
"end": 27400
} | class ____(BuildContext):
'''lists the targets to execute'''
cmd = 'list'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
self.pre_build()
self.timer = Utils.Timer()
for g in self.groups:
for tg in g:
try:
f = tg.post
except AttributeError:
pass
else:
f()
try:
self.get_tgen_by_name('')
except Errors.WafError:
pass
targets = sorted(self.task_gen_cache_names)
line_just = max(len(t) for t in targets) if targets else 0
for target in targets:
tgen = self.task_gen_cache_names[target]
descript = getattr(tgen, 'description', '')
if descript:
target = target.ljust(line_just)
descript = ': %s' % descript
Logs.pprint('GREEN', target, label=descript)
| ListContext |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 23075,
"end": 23968
} | class ____(torch.nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
**kwargs,
)
def forward(self, x):
if x.numel() > 0:
return super().forward(x)
output_shape = [
((i - 1) * d - 2 * p + (di * (k - 1) + 1) + op)
for i, p, di, k, d, op in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride,
self.output_padding,
)
]
output_shape = [x.shape[0], self.bias.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape) # noqa: F821
| ConvTransposeCallSuperForwardDirectly |
python | django__django | django/forms/fields.py | {
"start": 43944,
"end": 45624
} | class ____(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
"invalid_date": _("Enter a valid date."),
"invalid_time": _("Enter a valid time."),
}
def __init__(self, *, input_date_formats=None, input_time_formats=None, **kwargs):
errors = self.default_error_messages.copy()
if "error_messages" in kwargs:
errors.update(kwargs["error_messages"])
localize = kwargs.get("localize", False)
fields = (
DateField(
input_formats=input_date_formats,
error_messages={"invalid": errors["invalid_date"]},
localize=localize,
),
TimeField(
input_formats=input_time_formats,
error_messages={"invalid": errors["invalid_time"]},
localize=localize,
),
)
super().__init__(fields, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(
self.error_messages["invalid_date"], code="invalid_date"
)
if data_list[1] in self.empty_values:
raise ValidationError(
self.error_messages["invalid_time"], code="invalid_time"
)
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
| SplitDateTimeField |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_events_reader.py | {
"start": 12209,
"end": 13105
} | class ____:
"""Base class for digest.
Properties:
wall_time: A timestamp for the digest as a `float` (unit: s).
locator: A datum that allows tracng the digest to its original
location. It can be either of the two:
1. Bytes offset from the beginning of the file as a single integer,
for the case of all digests of the same kind coming from the same
file.
2. A tuple of a file index and a byte offset. This applies to case
in which the same type of debugger data may come from multiple files,
e.g., graph execution traces.
"""
def __init__(self, wall_time, locator):
self._wall_time = wall_time
self._locator = locator
@property
def wall_time(self):
return self._wall_time
@property
def locator(self):
return self._locator
def to_json(self):
return {"wall_time": self.wall_time}
| BaseDigest |
python | run-llama__llama_index | llama-index-core/llama_index/core/ingestion/data_sinks.py | {
"start": 236,
"end": 582
} | class ____(BaseModel):
"""
A class containing metadata for a type of data sink.
"""
name: str = Field(
description="Unique and human-readable name for the type of data sink"
)
component_type: Type[BasePydanticVectorStore] = Field(
description="Type of component that implements the data sink"
)
| DataSink |
python | doocs__leetcode | solution/2500-2599/2595.Number of Even and Odd Bits/Solution.py | {
"start": 0,
"end": 201
} | class ____:
def evenOddBit(self, n: int) -> List[int]:
ans = [0, 0]
i = 0
while n:
ans[i] += n & 1
i ^= 1
n >>= 1
return ans
| Solution |
python | readthedocs__readthedocs.org | readthedocs/oauth/migrations/0001_initial.py | {
"start": 133,
"end": 9369
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="BitbucketProject",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"pub_date",
models.DateTimeField(auto_now_add=True, verbose_name="Publication date"),
),
(
"modified_date",
models.DateTimeField(auto_now=True, verbose_name="Modified date"),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
(
"full_name",
models.CharField(unique=True, max_length=255, verbose_name="Full Name"),
),
(
"description",
models.TextField(
help_text="The reStructuredText description of the project",
null=True,
verbose_name="Description",
blank=True,
),
),
(
"vcs",
models.CharField(max_length=200, verbose_name="vcs", blank=True),
),
(
"git_url",
models.CharField(max_length=200, verbose_name="Git URL", blank=True),
),
(
"ssh_url",
models.CharField(max_length=200, verbose_name="SSH URL", blank=True),
),
(
"html_url",
models.URLField(null=True, verbose_name="HTML URL", blank=True),
),
("active", models.BooleanField(default=False, verbose_name="Active")),
("json", models.TextField(verbose_name=b"JSON")),
],
),
migrations.CreateModel(
name="BitbucketTeam",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"pub_date",
models.DateTimeField(auto_now_add=True, verbose_name="Publication date"),
),
(
"modified_date",
models.DateTimeField(auto_now=True, verbose_name="Modified date"),
),
(
"login",
models.CharField(unique=True, max_length=255, verbose_name="Login"),
),
(
"email",
models.EmailField(max_length=255, null=True, verbose_name="Email", blank=True),
),
(
"name",
models.CharField(max_length=255, null=True, verbose_name="Name", blank=True),
),
(
"html_url",
models.URLField(null=True, verbose_name="HTML URL", blank=True),
),
("active", models.BooleanField(default=False, verbose_name="Active")),
("json", models.TextField(verbose_name=b"JSON")),
(
"users",
models.ManyToManyField(
related_name="bitbucket_organizations",
verbose_name="Users",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="GithubOrganization",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"pub_date",
models.DateTimeField(auto_now_add=True, verbose_name="Publication date"),
),
(
"modified_date",
models.DateTimeField(auto_now=True, verbose_name="Modified date"),
),
(
"login",
models.CharField(unique=True, max_length=255, verbose_name="Login"),
),
(
"email",
models.EmailField(max_length=255, null=True, verbose_name="Email", blank=True),
),
(
"name",
models.CharField(max_length=255, null=True, verbose_name="Name", blank=True),
),
(
"html_url",
models.URLField(null=True, verbose_name="HTML URL", blank=True),
),
("active", models.BooleanField(default=False, verbose_name="Active")),
("json", models.TextField(verbose_name=b"JSON")),
(
"users",
models.ManyToManyField(
related_name="github_organizations",
verbose_name="Users",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="GithubProject",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"pub_date",
models.DateTimeField(auto_now_add=True, verbose_name="Publication date"),
),
(
"modified_date",
models.DateTimeField(auto_now=True, verbose_name="Modified date"),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
(
"full_name",
models.CharField(max_length=255, verbose_name="Full Name"),
),
(
"description",
models.TextField(
help_text="The reStructuredText description of the project",
null=True,
verbose_name="Description",
blank=True,
),
),
(
"git_url",
models.CharField(max_length=200, verbose_name="Git URL", blank=True),
),
(
"ssh_url",
models.CharField(max_length=200, verbose_name="SSH URL", blank=True),
),
(
"html_url",
models.URLField(null=True, verbose_name="HTML URL", blank=True),
),
("active", models.BooleanField(default=False, verbose_name="Active")),
("json", models.TextField(verbose_name=b"JSON")),
(
"organization",
models.ForeignKey(
related_name="projects",
verbose_name="Organization",
blank=True,
to="oauth.GithubOrganization",
null=True,
on_delete=models.CASCADE,
),
),
(
"users",
models.ManyToManyField(
related_name="github_projects",
verbose_name="Users",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AddField(
model_name="bitbucketproject",
name="organization",
field=models.ForeignKey(
related_name="projects",
verbose_name="Organization",
blank=True,
to="oauth.BitbucketTeam",
null=True,
on_delete=models.CASCADE,
),
),
migrations.AddField(
model_name="bitbucketproject",
name="users",
field=models.ManyToManyField(
related_name="bitbucket_projects",
verbose_name="Users",
to=settings.AUTH_USER_MODEL,
),
),
]
| Migration |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/corge/package.py | {
"start": 1304,
"end": 6865
} | class ____
{
private:
static const int version_major;
static const int version_minor;
public:
Corge();
int get_version() const;
int corgegate() const;
};
#endif // CORGE_H_
"""
corge_version_h = """
const int corge_version_major = %s;
const int corge_version_minor = %s;
"""
corgegator_cc = """
#include <iostream>
#include "corge.h"
int
main(int argc, char* argv[])
{
std::cout << "corgerator called with ";
if (argc == 0) {
std::cout << "no command-line arguments" << std::endl;
} else {
std::cout << "command-line arguments:";
for (int i = 0; i < argc; ++i) {
std::cout << " \"" << argv[i] << "\"";
}
std::cout << std::endl;
}
std::cout << "corgegating.."<<std::endl;
Corge corge;
corge.corgegate();
std::cout << "done."<<std::endl;
return 0;
}
"""
mkdirp("%s/corge" % prefix.include)
mkdirp("%s/corge" % self.stage.source_path)
with open("%s/corge_version.h" % self.stage.source_path, "w", encoding="utf-8") as f:
f.write(corge_version_h % (self.version[0], self.version[1:]))
with open("%s/corge/corge.cc" % self.stage.source_path, "w", encoding="utf-8") as f:
f.write(corge_cc % prefix.config)
with open("%s/corge/corge.h" % self.stage.source_path, "w", encoding="utf-8") as f:
f.write(corge_h)
with open("%s/corge/corgegator.cc" % self.stage.source_path, "w", encoding="utf-8") as f:
f.write(corgegator_cc)
gpp = which("g++")
if sys.platform == "darwin":
gpp = which("clang++")
gpp(
"-Dcorge_EXPORTS",
"-I%s" % self.stage.source_path,
"-I%s" % spec["quux"].prefix.include,
"-I%s" % spec["garply"].prefix.include,
"-O2",
"-g",
"-DNDEBUG",
"-fPIC",
"-o",
"corge.cc.o",
"-c",
"corge/corge.cc",
)
gpp(
"-Dcorge_EXPORTS",
"-I%s" % self.stage.source_path,
"-I%s" % spec["quux"].prefix.include,
"-I%s" % spec["garply"].prefix.include,
"-O2",
"-g",
"-DNDEBUG",
"-fPIC",
"-o",
"corgegator.cc.o",
"-c",
"corge/corgegator.cc",
)
if sys.platform == "darwin":
gpp(
"-fPIC",
"-O2",
"-g",
"-DNDEBUG",
"-dynamiclib",
"-install_name",
"@rpath/libcorge.dylib",
"-o",
"libcorge.dylib",
"corge.cc.o",
"-Wl,-rpath,%s" % spec["quux"].prefix.lib64,
"-Wl,-rpath,%s" % spec["garply"].prefix.lib64,
"%s/libquux.dylib" % spec["quux"].prefix.lib64,
"%s/libgarply.dylib" % spec["garply"].prefix.lib64,
)
gpp(
"-O2",
"-g",
"-DNDEBUG",
"-rdynamic",
"corgegator.cc.o",
"-o",
"corgegator",
"-Wl,-rpath,%s" % prefix.lib64,
"-Wl,-rpath,%s" % spec["quux"].prefix.lib64,
"-Wl,-rpath,%s" % spec["garply"].prefix.lib64,
"libcorge.dylib",
"%s/libquux.dylib.3.0" % spec["quux"].prefix.lib64,
"%s/libgarply.dylib.3.0" % spec["garply"].prefix.lib64,
)
mkdirp(prefix.lib64)
copy("libcorge.dylib", "%s/libcorge.dylib" % prefix.lib64)
os.link("%s/libcorge.dylib" % prefix.lib64, "%s/libcorge.dylib.3.0" % prefix.lib64)
else:
gpp(
"-fPIC",
"-O2",
"-g",
"-DNDEBUG",
"-shared",
"-Wl,-soname,libcorge.so",
"-o",
"libcorge.so",
"corge.cc.o",
"-Wl,-rpath,%s:%s::::" % (spec["quux"].prefix.lib64, spec["garply"].prefix.lib64),
"%s/libquux.so" % spec["quux"].prefix.lib64,
"%s/libgarply.so" % spec["garply"].prefix.lib64,
)
gpp(
"-O2",
"-g",
"-DNDEBUG",
"-rdynamic",
"corgegator.cc.o",
"-o",
"corgegator",
"-Wl,-rpath,%s" % prefix.lib64,
"-Wl,-rpath,%s" % spec["quux"].prefix.lib64,
"-Wl,-rpath,%s" % spec["garply"].prefix.lib64,
"libcorge.so",
"%s/libquux.so.3.0" % spec["quux"].prefix.lib64,
"%s/libgarply.so.3.0" % spec["garply"].prefix.lib64,
)
mkdirp(prefix.lib64)
copy("libcorge.so", "%s/libcorge.so" % prefix.lib64)
os.link("%s/libcorge.so" % prefix.lib64, "%s/libcorge.so.3.0" % prefix.lib64)
copy("corgegator", "%s/corgegator" % prefix.lib64)
copy("%s/corge/corge.h" % self.stage.source_path, "%s/corge/corge.h" % prefix.include)
mkdirp(prefix.bin)
copy("corge_version.h", "%s/corge_version.h" % prefix.bin)
os.symlink("%s/corgegator" % prefix.lib64, "%s/corgegator" % prefix.bin)
os.symlink("%s/quuxifier" % spec["quux"].prefix.lib64, "%s/quuxifier" % prefix.bin)
os.symlink("%s/garplinator" % spec["garply"].prefix.lib64, "%s/garplinator" % prefix.bin)
| Corge |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.