language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | great-expectations__great_expectations | tests/core/test_expectation_suite.py | {
"start": 49382,
"end": 55773
} | class ____:
@pytest.mark.unit
def test_hash_consistency_with_equality(self, empty_data_context):
expectation1 = ExpectColumnValuesToNotBeNull(column="test_column")
expectation2 = ExpectColumnValuesToNotBeNull(column="test_column")
suite1 = ExpectationSuite(name="test_suite")
suite1.add_expectation(expectation1)
suite2 = ExpectationSuite(name="test_suite")
suite2.add_expectation(expectation2)
assert suite1 == suite2
assert hash(suite1) == hash(suite2)
@pytest.mark.unit
def test_hash_different_for_different_names(self, empty_data_context):
expectation = ExpectColumnValuesToNotBeNull(column="test_column")
suite1 = ExpectationSuite(name="test_suite_1")
suite1.add_expectation(expectation)
suite2 = ExpectationSuite(name="test_suite_2")
suite2.add_expectation(expectation)
assert suite1 != suite2
assert hash(suite1) != hash(suite2)
@pytest.mark.unit
def test_hash_different_for_different_expectations(self, empty_data_context):
expectation1 = ExpectColumnValuesToNotBeNull(column="test_column_1")
expectation2 = ExpectColumnValuesToNotBeNull(column="test_column_2")
suite1 = ExpectationSuite(name="test_suite")
suite1.add_expectation(expectation1)
suite2 = ExpectationSuite(name="test_suite")
suite2.add_expectation(expectation2)
assert suite1 != suite2
assert hash(suite1) != hash(suite2)
@pytest.mark.unit
def test_hash_different_for_different_meta(self, empty_data_context):
expectation = ExpectColumnValuesToNotBeNull(column="test_column")
suite1 = ExpectationSuite(name="test_suite", meta={"test": "value1"})
suite1.add_expectation(expectation)
suite2 = ExpectationSuite(name="test_suite", meta={"test": "value2"})
suite2.add_expectation(expectation)
assert suite1 != suite2
assert hash(suite1) != hash(suite2)
@pytest.mark.unit
def test_hash_different_for_different_suite_parameters(self, empty_data_context):
expectation = ExpectColumnValuesToNotBeNull(column="test_column")
suite1 = ExpectationSuite(name="test_suite", suite_parameters={"param1": "value1"})
suite1.add_expectation(expectation)
suite2 = ExpectationSuite(name="test_suite", suite_parameters={"param1": "value2"})
suite2.add_expectation(expectation)
assert suite1 != suite2
assert hash(suite1) != hash(suite2)
@pytest.mark.unit
def test_hash_stable_across_runs(self, empty_data_context):
expectation = ExpectColumnValuesToNotBeNull(column="test_column")
suite = ExpectationSuite(name="test_suite")
suite.add_expectation(expectation)
hash1 = hash(suite)
hash2 = hash(suite)
hash3 = hash(suite)
assert hash1 == hash2 == hash3
@pytest.mark.unit
def test_expectation_suite_severity_functionality():
"""Test that severity is properly handled in ExpectationSuite operations."""
from great_expectations.expectations.metadata_types import FailureSeverity
# Create a suite with expectations that have different severities
suite = ExpectationSuite(name="test_suite")
# Add expectation configuration with default severity
config1 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column_1"},
)
suite.add_expectation_configuration(config1)
assert suite.expectations[0].severity == FailureSeverity.CRITICAL
# Add expectation configuration with custom severity
config2 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column_2"},
severity=FailureSeverity.WARNING,
)
suite.add_expectation_configuration(config2)
assert suite.expectations[1].severity == FailureSeverity.WARNING
# Test that severity is preserved when accessing expectations
assert suite.expectations[0].severity == FailureSeverity.CRITICAL
assert suite.expectations[1].severity == FailureSeverity.WARNING
# Test that severity is included in serialization
suite_dict = suite.to_json_dict()
assert "expectations" in suite_dict
assert len(suite_dict["expectations"]) == 2
assert suite_dict["expectations"][0]["severity"] == "critical"
assert suite_dict["expectations"][1]["severity"] == "warning"
# Test that severity is preserved when modifying expectations
suite.expectations[0].severity = FailureSeverity.INFO
assert suite.expectations[0].severity == FailureSeverity.INFO
# Note: Current implementation doesn't include severity in equality comparison
# so expectations with same type/kwargs but different severity are considered equal
suite2 = ExpectationSuite(name="test_suite")
config1_copy = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column_1"},
severity=FailureSeverity.INFO,
)
suite2.add_expectation_configuration(config1_copy)
# Should be equal because severity is not considered in equality
assert suite.expectations[0] == suite2.expectations[0]
assert hash(suite.expectations[0]) == hash(suite2.expectations[0])
@pytest.mark.unit
def test_expectation_suite_severity_in_configuration():
"""Test that severity is properly handled in ExpectationConfiguration within suites."""
from great_expectations.expectations.metadata_types import FailureSeverity
# Create expectation configuration with custom severity
config = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
severity=FailureSeverity.WARNING,
)
# Add to suite
suite = ExpectationSuite(name="test_suite")
suite.add_expectation_configuration(config)
# Verify severity is preserved
assert suite.expectations[0].severity == FailureSeverity.WARNING
# Test that configuration property preserves severity
expectation_config = suite.expectations[0].configuration
assert expectation_config.severity == FailureSeverity.WARNING
# Test that to_domain_obj preserves severity
domain_obj = config.to_domain_obj()
assert domain_obj.severity == FailureSeverity.WARNING
| TestExpectationSuiteHash |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 355701,
"end": 356349
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("IpAllowListEntryEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("IpAllowListEntry"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| IpAllowListEntryConnection |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_flows.py | {
"start": 247,
"end": 2508
} | class ____:
async def test_create_flow(self, session, client):
flow_data = {"name": "my-flow", "labels": {"env": "dev"}}
response = await client.post("/flows/", json=flow_data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["name"] == "my-flow"
flow_id = response.json()["id"]
flow = await models.flows.read_flow(session=session, flow_id=flow_id)
assert flow
assert str(flow.id) == flow_id
assert flow.labels == {"env": "dev"}
async def test_create_flow_populates_and_returned_created(self, client):
current_time = now("UTC")
flow_data = {"name": "my-flow"}
response = await client.post("/flows/", json=flow_data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["name"] == "my-flow"
assert parse_datetime(response.json()["created"]) >= current_time
assert parse_datetime(response.json()["updated"]) >= current_time
async def test_create_flow_gracefully_fallsback(self, client):
"""If the flow already exists, we return a 200 code"""
flow_data = {"name": "my-flow"}
response_1 = await client.post("/flows/", json=flow_data)
assert response_1.status_code == status.HTTP_201_CREATED
assert response_1.json()["name"] == "my-flow"
response_2 = await client.post("/flows/", json=flow_data)
assert response_2.status_code == status.HTTP_200_OK
assert response_2.json()["name"] == "my-flow"
@pytest.mark.parametrize(
"name",
[
"my flow",
"my:flow",
r"my\flow",
"my👍flow",
"my|flow",
],
)
async def test_create_flow_with_nonstandard_characters(self, client, name):
response = await client.post("/flows/", json=dict(name=name))
assert response.status_code == 201
@pytest.mark.parametrize(
"name",
[
"my/flow",
r"my%flow",
],
)
async def test_create_flow_with_invalid_characters_fails(self, client, name):
response = await client.post("/flows/", json=dict(name=name))
assert response.status_code == 422
| TestCreateFlow |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 26517,
"end": 27688
} | class ____(MemoryPlanningLine):
node: BufferLike
is_reused: bool = False
def __post_init__(self):
assert V.graph.scheduler.current_node is not None
self.scheduler_node_index = V.graph.scheduler.nodes.index(
V.graph.scheduler.current_node
)
def plan(self, state: MemoryPlanningState) -> MemoryPlanningLine:
if len(self.node.get_inputs_that_alias_output()) > 0:
return self
if isinstance(self.node.layout, ir.MultiOutputLayout):
return self
assert not self.is_reused
if self.node.get_name() in V.graph.removed_buffers:
return NullLine(self.wrapper)
if config.allow_buffer_reuse:
state.push(buffer_reuse_key(self.node), self)
return self
def codegen(self, code: IndentedBuffer) -> None:
assert self.node.get_name() not in V.graph.removed_buffers
if not self.is_reused:
code.writeline(self.wrapper.make_buffer_free(self.node))
def codegen_fx(self, converter: FxConverter) -> FxConversionFunc:
return converter._generate_free_if_not_reused
@dataclasses.dataclass
| FreeIfNotReusedLine |
python | neetcode-gh__leetcode | python/0079-word-search.py | {
"start": 0,
"end": 1153
} | class ____:
def exist(self, board: List[List[str]], word: str) -> bool:
ROWS, COLS = len(board), len(board[0])
path = set()
def dfs(r, c, i):
if i == len(word):
return True
if (
min(r, c) < 0
or r >= ROWS
or c >= COLS
or word[i] != board[r][c]
or (r, c) in path
):
return False
path.add((r, c))
res = (
dfs(r + 1, c, i + 1)
or dfs(r - 1, c, i + 1)
or dfs(r, c + 1, i + 1)
or dfs(r, c - 1, i + 1)
)
path.remove((r, c))
return res
# To prevent TLE,reverse the word if frequency of the first letter is more than the last letter's
count = sum(map(Counter, board), Counter())
if count[word[0]] > count[word[-1]]:
word = word[::-1]
for r in range(ROWS):
for c in range(COLS):
if dfs(r, c, 0):
return True
return False
# O(n * m * 4^n)
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_product_dimension_performance_report.py | {
"start": 1859,
"end": 10958
} | class ____(TestBaseProductDimensionPerformanceReport):
stream_name = "product_dimension_performance_report_daily"
report_file = "product_dimension_performance_report_daily"
incremental_report_file = "product_dimension_performance_report_daily_incremental"
incremental_report_file_with_records_further_cursor = (
"product_dimension_performance_report_daily_incremental_with_records_further_cursor"
)
report_file_with_records_further_start_date = "product_dimension_performance_report_daily_with_records_further_start_date"
records_number = 8
state_file = "product_dimension_performance_report_daily_state"
state_file_legacy = "product_dimension_performance_report_daily_state"
def mock_report_apis(self):
super().mock_report_apis()
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductDimensionPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductDimensionPerformanceReportRequest", "Aggregation": "Daily", "Columns": ["TimePeriod", "AccountName", "AccountNumber", "AdGroupName", "AdGroupId", "CampaignStatus", "AccountStatus", "AdGroupStatus", "Network", "AdId", "CampaignId", "CampaignName", "CurrencyCode", "DeviceType", "Language", "MerchantProductId", "Title", "Condition", "Brand", "Price", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Revenue", "RevenuePerConversion", "SellerName", "OfferLanguage", "CountryOfSale", "AdStatus", "AdDistribution", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "ReturnOnAdSpend", "BidStrategyType", "LocalStoreCode", "StoreId", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "CostPerConversion", "ViewThroughConversions", "Goal", "GoalType", "ProductBought", "QuantityBought", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "ViewThroughConversionsQualified", "ProductBoughtTitle", "GTIN", "MPN", "ViewThroughRevenue", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall", "CampaignType", "AssetGroupId", "AssetGroupName", "AssetGroupStatus", "CustomLabel0", "CustomLabel1", "CustomLabel2", "CustomLabel3", "CustomLabel4", "ProductType1", "ProductType2", "ProductType3", "ProductType4", "ProductType5"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductDimensionPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductDimensionPerformanceReportRequest", "Aggregation": "Daily", "Columns": ["TimePeriod", "AccountName", "AccountNumber", "AdGroupName", "AdGroupId", "CampaignStatus", "AccountStatus", "AdGroupStatus", "Network", "AdId", "CampaignId", "CampaignName", "CurrencyCode", "DeviceType", "Language", "MerchantProductId", "Title", "Condition", "Brand", "Price", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Revenue", "RevenuePerConversion", "SellerName", "OfferLanguage", "CountryOfSale", "AdStatus", "AdDistribution", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "ReturnOnAdSpend", "BidStrategyType", "LocalStoreCode", "StoreId", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "CostPerConversion", "ViewThroughConversions", "Goal", "GoalType", "ProductBought", "QuantityBought", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "ViewThroughConversionsQualified", "ProductBoughtTitle", "GTIN", "MPN", "ViewThroughRevenue", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall", "CampaignType", "AssetGroupId", "AssetGroupName", "AssetGroupStatus", "CustomLabel0", "CustomLabel1", "CustomLabel2", "CustomLabel3", "CustomLabel4", "ProductType1", "ProductType2", "ProductType3", "ProductType4", "ProductType5"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductDimensionPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductDimensionPerformanceReportRequest", "Aggregation": "Daily", "Columns": ["TimePeriod", "AccountName", "AccountNumber", "AdGroupName", "AdGroupId", "CampaignStatus", "AccountStatus", "AdGroupStatus", "Network", "AdId", "CampaignId", "CampaignName", "CurrencyCode", "DeviceType", "Language", "MerchantProductId", "Title", "Condition", "Brand", "Price", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Revenue", "RevenuePerConversion", "SellerName", "OfferLanguage", "CountryOfSale", "AdStatus", "AdDistribution", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "ReturnOnAdSpend", "BidStrategyType", "LocalStoreCode", "StoreId", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "CostPerConversion", "ViewThroughConversions", "Goal", "GoalType", "ProductBought", "QuantityBought", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "ViewThroughConversionsQualified", "ProductBoughtTitle", "GTIN", "MPN", "ViewThroughRevenue", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall", "CampaignType", "AssetGroupId", "AssetGroupName", "AssetGroupStatus", "CustomLabel0", "CustomLabel1", "CustomLabel2", "CustomLabel3", "CustomLabel4", "ProductType1", "ProductType2", "ProductType3", "ProductType4", "ProductType5"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductDimensionPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductDimensionPerformanceReportRequest", "Aggregation": "Daily", "Columns": ["TimePeriod", "AccountName", "AccountNumber", "AdGroupName", "AdGroupId", "CampaignStatus", "AccountStatus", "AdGroupStatus", "Network", "AdId", "CampaignId", "CampaignName", "CurrencyCode", "DeviceType", "Language", "MerchantProductId", "Title", "Condition", "Brand", "Price", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Revenue", "RevenuePerConversion", "SellerName", "OfferLanguage", "CountryOfSale", "AdStatus", "AdDistribution", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "ReturnOnAdSpend", "BidStrategyType", "LocalStoreCode", "StoreId", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "CostPerConversion", "ViewThroughConversions", "Goal", "GoalType", "ProductBought", "QuantityBought", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "ViewThroughConversionsQualified", "ProductBoughtTitle", "GTIN", "MPN", "ViewThroughRevenue", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall", "CampaignType", "AssetGroupId", "AssetGroupName", "AssetGroupStatus", "CustomLabel0", "CustomLabel1", "CustomLabel2", "CustomLabel3", "CustomLabel4", "ProductType1", "ProductType2", "ProductType3", "ProductType4", "ProductType5"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
| TestProductDimensionPerformanceReportDailyStream |
python | doocs__leetcode | lcof/面试题14- II. 剪绳子 II/Solution.py | {
"start": 0,
"end": 310
} | class ____:
def cuttingRope(self, n: int) -> int:
mod = 10**9 + 7
if n < 4:
return n - 1
if n % 3 == 0:
return pow(3, n // 3, mod)
if n % 3 == 1:
return (pow(3, n // 3 - 1, mod) * 4) % mod
return pow(3, n // 3, mod) * 2 % mod
| Solution |
python | pydantic__pydantic | pydantic/plugin/_schema_validator.py | {
"start": 1581,
"end": 5267
} | class ____:
"""Pluggable schema validator."""
__slots__ = '_schema_validator', 'validate_json', 'validate_python', 'validate_strings'
def __init__(
self,
schema: CoreSchema,
schema_type: Any,
schema_type_path: SchemaTypePath,
schema_kind: SchemaKind,
config: CoreConfig | None,
plugins: Iterable[PydanticPluginProtocol],
plugin_settings: dict[str, Any],
) -> None:
self._schema_validator = SchemaValidator(schema, config)
python_event_handlers: list[BaseValidateHandlerProtocol] = []
json_event_handlers: list[BaseValidateHandlerProtocol] = []
strings_event_handlers: list[BaseValidateHandlerProtocol] = []
for plugin in plugins:
try:
p, j, s = plugin.new_schema_validator(
schema, schema_type, schema_type_path, schema_kind, config, plugin_settings
)
except TypeError as e: # pragma: no cover
raise TypeError(f'Error using plugin `{plugin.__module__}:{plugin.__class__.__name__}`: {e}') from e
if p is not None:
python_event_handlers.append(p)
if j is not None:
json_event_handlers.append(j)
if s is not None:
strings_event_handlers.append(s)
self.validate_python = build_wrapper(self._schema_validator.validate_python, python_event_handlers)
self.validate_json = build_wrapper(self._schema_validator.validate_json, json_event_handlers)
self.validate_strings = build_wrapper(self._schema_validator.validate_strings, strings_event_handlers)
def __getattr__(self, name: str) -> Any:
return getattr(self._schema_validator, name)
def build_wrapper(func: Callable[P, R], event_handlers: list[BaseValidateHandlerProtocol]) -> Callable[P, R]:
if not event_handlers:
return func
else:
on_enters = tuple(h.on_enter for h in event_handlers if filter_handlers(h, 'on_enter'))
on_successes = tuple(h.on_success for h in event_handlers if filter_handlers(h, 'on_success'))
on_errors = tuple(h.on_error for h in event_handlers if filter_handlers(h, 'on_error'))
on_exceptions = tuple(h.on_exception for h in event_handlers if filter_handlers(h, 'on_exception'))
@functools.wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
for on_enter_handler in on_enters:
on_enter_handler(*args, **kwargs)
try:
result = func(*args, **kwargs)
except ValidationError as error:
for on_error_handler in on_errors:
on_error_handler(error)
raise
except Exception as exception:
for on_exception_handler in on_exceptions:
on_exception_handler(exception)
raise
else:
for on_success_handler in on_successes:
on_success_handler(result)
return result
return wrapper
def filter_handlers(handler_cls: BaseValidateHandlerProtocol, method_name: str) -> bool:
"""Filter out handler methods which are not implemented by the plugin directly - e.g. are missing
or are inherited from the protocol.
"""
handler = getattr(handler_cls, method_name, None)
if handler is None:
return False
elif handler.__module__ == 'pydantic.plugin':
# this is the original handler, from the protocol due to runtime inheritance
# we don't want to call it
return False
else:
return True
| PluggableSchemaValidator |
python | xlwings__xlwings | xlwings/base_classes.py | {
"start": 14278,
"end": 14827
} | class ____:
# @property
# def api(self):
# raise NotImplementedError()
def delete(self):
raise NotImplementedError()
@property
def name(self):
raise NotImplementedError()
@name.setter
def name(self, value):
raise NotImplementedError()
@property
def refers_to(self):
raise NotImplementedError()
@refers_to.setter
def refers_to(self, value):
raise NotImplementedError()
@property
def refers_to_range(self):
raise NotImplementedError()
| Name |
python | PrefectHQ__prefect | tests/test_flows.py | {
"start": 64721,
"end": 67247
} | class ____:
@pytest.mark.clear_db
async def test_subflow_logs_are_written_correctly(self, prefect_client):
@flow
def my_subflow():
logger = get_run_logger()
logger.info("Hello smaller world!")
@flow
def my_flow():
logger = get_run_logger()
logger.info("Hello world!")
return my_subflow(return_state=True)
state = my_flow(return_state=True)
flow_run_id = state.state_details.flow_run_id
subflow_run_id = (await state.result()).state_details.flow_run_id
await _wait_for_logs(prefect_client, expected_num_logs=6)
logs = await prefect_client.read_logs()
log_messages = [log.message for log in logs]
assert all([log.task_run_id is None for log in logs])
assert "Hello world!" in log_messages, "Parent log message is present"
assert logs[log_messages.index("Hello world!")].flow_run_id == flow_run_id, (
"Parent log message has correct id"
)
assert "Hello smaller world!" in log_messages, "Child log message is present"
assert (
logs[log_messages.index("Hello smaller world!")].flow_run_id
== subflow_run_id
), "Child log message has correct id"
@pytest.mark.skip(reason="Fails with new engine, passed on old engine")
@pytest.mark.xfail(reason="Weird state sharing between new and old engine tests")
async def test_subflow_logs_are_written_correctly_with_tasks(self, prefect_client):
@task
def a_log_task():
logger = get_run_logger()
logger.info("Task log")
@flow
def my_subflow():
a_log_task()
logger = get_run_logger()
logger.info("Hello smaller world!")
@flow
def my_flow():
logger = get_run_logger()
logger.info("Hello world!")
return my_subflow(return_state=True)
subflow_state = my_flow()
subflow_run_id = subflow_state.state_details.flow_run_id
logs = await prefect_client.read_logs()
log_messages = [log.message for log in logs]
task_run_logs = [log for log in logs if log.task_run_id is not None]
assert all([log.flow_run_id == subflow_run_id for log in task_run_logs])
assert "Hello smaller world!" in log_messages
assert (
logs[log_messages.index("Hello smaller world!")].flow_run_id
== subflow_run_id
)
| TestSubflowRunLogs |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 3907,
"end": 4073
} | class ____(models.Model):
question = models.CharField(max_length=200)
history = HistoricalRecords(bases=[SessionsHistoricalModel])
| PollWithHistoricalSessionAttr |
python | coleifer__peewee | peewee.py | {
"start": 90712,
"end": 92987
} | class ____(Node):
def __init__(self, name, table, expressions, unique=False, safe=False,
where=None, using=None):
self._name = name
self._table = Entity(table) if not isinstance(table, Table) else table
self._expressions = expressions
self._where = where
self._unique = unique
self._safe = safe
self._using = using
@Node.copy
def safe(self, _safe=True):
self._safe = _safe
@Node.copy
def where(self, *expressions):
if self._where is not None:
expressions = (self._where,) + expressions
self._where = reduce(operator.and_, expressions)
@Node.copy
def using(self, _using=None):
self._using = _using
def __sql__(self, ctx):
statement = 'CREATE UNIQUE INDEX ' if self._unique else 'CREATE INDEX '
with ctx.scope_values(subquery=True):
ctx.literal(statement)
if self._safe:
ctx.literal('IF NOT EXISTS ')
# Sqlite uses CREATE INDEX <schema>.<name> ON <table>, whereas most
# others use: CREATE INDEX <name> ON <schema>.<table>.
if ctx.state.index_schema_prefix and \
isinstance(self._table, Table) and self._table._schema:
index_name = Entity(self._table._schema, self._name)
table_name = Entity(self._table.__name__)
else:
index_name = Entity(self._name)
table_name = self._table
ctx.sql(index_name)
if self._using is not None and \
ctx.state.index_using_precedes_table:
ctx.literal(' USING %s' % self._using) # MySQL style.
(ctx
.literal(' ON ')
.sql(table_name)
.literal(' '))
if self._using is not None and not \
ctx.state.index_using_precedes_table:
ctx.literal('USING %s ' % self._using) # Postgres/default.
ctx.sql(EnclosedNodeList([
SQL(expr) if isinstance(expr, basestring) else expr
for expr in self._expressions]))
if self._where is not None:
ctx.literal(' WHERE ').sql(self._where)
return ctx
| Index |
python | ray-project__ray | python/ray/tune/tests/test_progress_reporter.py | {
"start": 14713,
"end": 35772
} | class ____(unittest.TestCase):
def setUp(self) -> None:
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "auto"
os.environ["RAY_AIR_NEW_OUTPUT"] = "0"
def mock_trial(self, status, i):
mock = MagicMock()
mock.status = status
mock.trial_id = "%05d" % i
return mock
def testFairFilterTrials(self):
"""Tests that trials are represented fairly."""
trials_by_state = collections.defaultdict(list)
# States for which trials are under and overrepresented
states_under = (Trial.PAUSED, Trial.ERROR)
states_over = (Trial.PENDING, Trial.RUNNING, Trial.TERMINATED)
max_trials = 13
num_trials_under = 2 # num of trials for each underrepresented state
num_trials_over = 10 # num of trials for each overrepresented state
i = 0
for state in states_under:
for _ in range(num_trials_under):
trials_by_state[state].append(self.mock_trial(state, i))
i += 1
for state in states_over:
for _ in range(num_trials_over):
trials_by_state[state].append(self.mock_trial(state, i))
i += 1
filtered_trials_by_state = _fair_filter_trials(
trials_by_state, max_trials=max_trials
)
for state in trials_by_state:
if state in states_under:
expected_num_trials = num_trials_under
else:
expected_num_trials = (
max_trials - num_trials_under * len(states_under)
) / len(states_over)
state_trials = filtered_trials_by_state[state]
self.assertEqual(len(state_trials), expected_num_trials)
# Make sure trials are sorted newest-first within state.
for i in range(len(state_trials) - 1):
assert state_trials[i].trial_id < state_trials[i + 1].trial_id
def testAddMetricColumn(self):
"""Tests edge cases of add_metric_column."""
# Test list-initialized metric columns.
reporter = CLIReporter(metric_columns=["foo", "bar"])
with self.assertRaises(ValueError):
reporter.add_metric_column("bar")
with self.assertRaises(ValueError):
reporter.add_metric_column("baz", "qux")
reporter.add_metric_column("baz")
self.assertIn("baz", reporter._metric_columns)
# Test default-initialized (dict) metric columns.
reporter = CLIReporter()
reporter.add_metric_column("foo", "bar")
self.assertIn("foo", reporter._metric_columns)
def testInfer(self):
reporter = CLIReporter()
test_result = dict(foo_result=1, baz_result=4123, bar_result="testme")
def test(config):
for i in range(3):
tune.report(test_result)
analysis = tune.run(test, num_samples=3, verbose=3)
all_trials = analysis.trials
inferred_results = reporter._infer_user_metrics(all_trials)
for metric in inferred_results:
self.assertNotIn(metric, AUTO_RESULT_KEYS)
self.assertTrue(metric in test_result)
class TestReporter(CLIReporter):
_output = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._max_report_freqency = 0
def report(self, *args, **kwargs):
progress_str = self._progress_str(*args, **kwargs)
self._output.append(progress_str)
reporter = TestReporter()
analysis = tune.run(test, num_samples=3, progress_reporter=reporter, verbose=3)
found = {k: False for k in test_result}
for output in reporter._output:
for key in test_result:
if key in output:
found[key] = True
assert found["foo_result"]
assert found["baz_result"]
assert not found["bar_result"]
def testProgressStr(self):
trials = []
for i in range(5):
t = Mock()
if i == 0:
t.status = "TERMINATED"
elif i == 1:
t.status = "PENDING"
else:
t.status = "RUNNING"
t.trial_id = "%05d" % i
t.local_experiment_path = "/foo"
t.temporary_state = Mock()
t.temporary_state.location = "here"
t.config = {"a": i, "b": i * 2, "n": {"k": [i, 2 * i]}}
t.evaluated_params = {"a": i, "b": i * 2, "n/k/0": i, "n/k/1": 2 * i}
t.last_result = {
"config": {"a": i, "b": i * 2, "n": {"k": [i, 2 * i]}},
"metric_1": i / 2,
"metric_2": i / 4,
"nested": {"sub": i / 2},
}
t.__str__ = lambda self: self.trial_id
trials.append(t)
# One metric, two parameters
prog1 = _trial_progress_str(
trials, ["metric_1"], ["a", "b"], fmt="psql", max_rows=3, force_table=True
)
print(prog1)
assert prog1 == EXPECTED_RESULT_1
# No metric, all parameters
prog2 = _trial_progress_str(
trials, [], None, fmt="psql", max_rows=None, force_table=True
)
print(prog2)
assert prog2 == EXPECTED_RESULT_2
# Two metrics, one parameter, all with custom representation
prog3 = _trial_progress_str(
trials,
{"nested/sub": "NestSub", "metric_2": "Metric 2"},
{"a": "A"},
fmt="psql",
max_rows=3,
force_table=True,
)
print(prog3)
assert prog3 == EXPECTED_RESULT_3
# Current best trial
best1 = _best_trial_str(trials[1], "metric_1")
assert best1 == EXPECTED_BEST_1
def testBestTrialStr(self):
"""Assert that custom nested parameter columns are printed correctly"""
config = {"nested": {"conf": "nested_value"}, "toplevel": "toplevel_value"}
trial = Trial("", config=config, stub=True)
trial.run_metadata.last_result = {
"metric": 1,
"config": config,
"nested": {"metric": 2},
}
result = _best_trial_str(trial, "metric")
self.assertIn("nested_value", result)
result = _best_trial_str(trial, "metric", parameter_columns=["nested/conf"])
self.assertIn("nested_value", result)
# Test that this works with a nested metric
result = _best_trial_str(
trial, "nested/metric", parameter_columns=["nested/conf"]
)
self.assertIn("nested_value", result)
def testBestTrialZero(self):
trial1 = Trial("", config={}, stub=True)
trial1.run_metadata.last_result = {"metric": 7, "config": {}}
trial2 = Trial("", config={}, stub=True)
trial2.run_metadata.last_result = {"metric": 0, "config": {}}
trial3 = Trial("", config={}, stub=True)
trial3.run_metadata.last_result = {"metric": 2, "config": {}}
reporter = TuneReporterBase(metric="metric", mode="min")
best_trial, metric = reporter._current_best_trial([trial1, trial2, trial3])
assert best_trial == trial2
def testBestTrialNan(self):
trial1 = Trial("", config={}, stub=True)
trial1.run_metadata.last_result = {"metric": np.nan, "config": {}}
trial2 = Trial("", config={}, stub=True)
trial2.run_metadata.last_result = {"metric": 0, "config": {}}
trial3 = Trial("", config={}, stub=True)
trial3.run_metadata.last_result = {"metric": 2, "config": {}}
reporter = TuneReporterBase(metric="metric", mode="min")
best_trial, metric = reporter._current_best_trial([trial1, trial2, trial3])
assert best_trial == trial2
trial1 = Trial("", config={}, stub=True)
trial1.run_metadata.last_result = {"metric": np.nan, "config": {}}
trial2 = Trial("", config={}, stub=True)
trial2.run_metadata.last_result = {"metric": 0, "config": {}}
trial3 = Trial("", config={}, stub=True)
trial3.run_metadata.last_result = {"metric": 2, "config": {}}
reporter = TuneReporterBase(metric="metric", mode="max")
best_trial, metric = reporter._current_best_trial([trial1, trial2, trial3])
assert best_trial == trial3
def testTimeElapsed(self):
# Sun Feb 7 14:18:40 2016 -0800
# (time of the first Ray commit)
time_start = 1454825920
time_now = (
time_start
+ 1 * 60 * 60 # 1 hour
+ 31 * 60 # 31 minutes
+ 22 # 22 seconds
) # time to second commit
# Local timezone output can be tricky, so we don't check the
# day and the hour in this test.
output = _time_passed_str(time_start, time_now)
self.assertIn("Current time: 2016-02-", output)
self.assertIn(":50:02 (running for 01:31:22.00)", output)
time_now += 2 * 60 * 60 * 24 # plus two days
output = _time_passed_str(time_start, time_now)
self.assertIn("Current time: 2016-02-", output)
self.assertIn(":50:02 (running for 2 days, 01:31:22.00)", output)
def testCurrentBestTrial(self):
trials = []
for i in range(5):
t = Mock()
t.status = "RUNNING"
t.trial_id = "%05d" % i
t.local_experiment_path = "/foo"
t.temporary_state = Mock()
t.temporary_state.location = "here"
t.config = {"a": i, "b": i * 2, "n": {"k": [i, 2 * i]}}
t.evaluated_params = {"a": i}
t.last_result = {"config": {"a": i}, "metric_1": i / 2}
t.__str__ = lambda self: self.trial_id
trials.append(t)
class TestReporter(CLIReporter):
_output = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._max_report_freqency = 0
def report(self, *args, **kwargs):
progress_str = self._progress_str(*args, **kwargs)
self._output.append(progress_str)
reporter = TestReporter(mode="max")
reporter.report(trials, done=False)
assert EXPECTED_BEST_2 in reporter._output[0]
def testSortByMetric(self):
trials = []
for i in range(5):
t = Mock()
if i < 3:
t.status = "TERMINATED"
elif i == 3:
t.status = "PENDING"
else:
t.status = "RUNNING"
t.trial_id = "%05d" % i
t.local_experiment_path = "/foo"
t.temporary_state = Mock()
t.temporary_state.location = "here"
t.run_metadata = Mock()
t.config = {"a": i}
t.evaluated_params = {"a": i}
t.last_result = {"config": {"a": i}}
t.__str__ = lambda self: self.trial_id
trials.append(t)
# Set `metric_1` for terminated trails
trials[0].last_result["metric_1"] = 0.3
trials[0].last_result["nested"] = {"metric_2": 0.3}
trials[1].last_result["metric_1"] = 0.2
trials[1].last_result["nested"] = {"metric_2": 0.2}
trials[2].last_result["metric_1"] = 0.4
trials[2].last_result["nested"] = {"metric_2": 0.4}
class TestReporter(CLIReporter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._max_report_freqency = 0
self._output = ""
def report(self, *args, **kwargs):
progress_str = self._progress_str(*args, **kwargs)
self._output = progress_str
# Default reporter
reporter1 = TestReporter(max_progress_rows=4, mode="max", metric="metric_1")
reporter1.report(trials, done=False)
assert EXPECTED_SORT_RESULT_UNSORTED in reporter1._output
# Sort by metric (asc)
reporter2 = TestReporter(
max_progress_rows=4, mode="min", metric="metric_1", sort_by_metric=True
)
reporter2.report(trials, done=False)
assert EXPECTED_SORT_RESULT_ASC in reporter2._output
# Sort by metric (desc)
reporter3 = TestReporter(
max_progress_rows=4, mode="max", metric="metric_1", sort_by_metric=True
)
reporter3.report(trials, done=False)
assert EXPECTED_SORT_RESULT_DESC in reporter3._output
# Sort by metric when mode is None
reporter4 = TestReporter(
max_progress_rows=4, metric="metric_1", sort_by_metric=True
)
reporter4.report(trials, done=False)
assert EXPECTED_SORT_RESULT_UNSORTED in reporter4._output
# Sort by metric when metric is None
reporter5 = TestReporter(max_progress_rows=4, mode="max", sort_by_metric=True)
reporter5.report(trials, done=False)
assert EXPECTED_SORT_RESULT_UNSORTED in reporter5._output
# Sort by metric when metric is passed using
# reporter.setup (called from tune.run)
# calling repoter.set_search_properties
reporter6 = TestReporter(max_progress_rows=4, sort_by_metric=True)
reporter6.set_search_properties(metric="metric_1", mode="max")
reporter6.report(trials, done=False)
assert EXPECTED_SORT_RESULT_DESC in reporter6._output
# Sort by nested metric (asc)
reporter7 = TestReporter(
max_progress_rows=4,
mode="min",
metric="nested/metric_2",
sort_by_metric=True,
metric_columns=["nested/metric_2"],
)
reporter7.report(trials, done=False)
assert EXPECTED_NESTED_SORT_RESULT in reporter7._output
def testEndToEndReporting(self):
try:
os.environ["_TEST_TUNE_TRIAL_UUID"] = "xxxxx"
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "100"
output = run_string_as_driver(END_TO_END_COMMAND)
try:
# New execution path is too fast, trials are already terminated
if os.environ.get("TUNE_NEW_EXECUTION") == "0":
assert EXPECTED_END_TO_END_START in output
assert EXPECTED_END_TO_END_END in output
for line in output.splitlines():
if "(raylet)" in line:
assert "cluster ID" in line, "Unexpected raylet log messages"
except Exception:
print("*** BEGIN OUTPUT ***")
print(output)
print("*** END OUTPUT ***")
raise
finally:
del os.environ["_TEST_TUNE_TRIAL_UUID"]
def testVerboseReporting(self):
try:
os.environ["_TEST_TUNE_TRIAL_UUID"] = "xxxxx"
verbose_0_cmd = VERBOSE_CMD + "verbose=0)"
output = run_string_as_driver(verbose_0_cmd)
try:
self.assertNotIn(VERBOSE_EXP_OUT_1, output)
self.assertNotIn(VERBOSE_EXP_OUT_2, output)
self.assertNotIn(VERBOSE_TRIAL_NORM_1, output)
self.assertIsNone(re.search(VERBOSE_TRIAL_NORM_2_PATTERN, output))
self.assertNotIn(VERBOSE_TRIAL_NORM_3, output)
self.assertNotIn(VERBOSE_TRIAL_NORM_4, output)
if os.environ.get("TUNE_NEW_EXECUTION") == "0":
self.assertNotIn(VERBOSE_TRIAL_DETAIL, output)
except Exception:
print("*** BEGIN OUTPUT ***")
print(output)
print("*** END OUTPUT ***")
raise
verbose_1_cmd = VERBOSE_CMD + "verbose=1)"
output = run_string_as_driver(verbose_1_cmd)
try:
# New execution path is too fast, trials are already terminated
if os.environ.get("TUNE_NEW_EXECUTION") == "0":
self.assertIn(VERBOSE_EXP_OUT_1, output)
self.assertIn(VERBOSE_EXP_OUT_2, output)
self.assertNotIn(VERBOSE_TRIAL_NORM_1, output)
self.assertIsNone(re.search(VERBOSE_TRIAL_NORM_2_PATTERN, output))
self.assertNotIn(VERBOSE_TRIAL_NORM_3, output)
self.assertNotIn(VERBOSE_TRIAL_NORM_4, output)
if os.environ.get("TUNE_NEW_EXECUTION") == "0":
self.assertNotIn(VERBOSE_TRIAL_DETAIL, output)
except Exception:
print("*** BEGIN OUTPUT ***")
print(output)
print("*** END OUTPUT ***")
raise
verbose_2_cmd = VERBOSE_CMD + "verbose=2)"
output = run_string_as_driver(verbose_2_cmd)
try:
if os.environ.get("TUNE_NEW_EXECUTION") == "0":
self.assertIn(VERBOSE_EXP_OUT_1, output)
self.assertIn(VERBOSE_EXP_OUT_2, output)
self.assertIn(VERBOSE_TRIAL_NORM_1, output)
self.assertIsNotNone(re.search(VERBOSE_TRIAL_NORM_2_PATTERN, output))
self.assertIn(VERBOSE_TRIAL_NORM_3, output)
self.assertIn(VERBOSE_TRIAL_NORM_4, output)
self.assertNotIn(VERBOSE_TRIAL_DETAIL, output)
except Exception:
print("*** BEGIN OUTPUT ***")
print(output)
print("*** END OUTPUT ***")
raise
verbose_3_cmd = VERBOSE_CMD + "verbose=3)"
output = run_string_as_driver(verbose_3_cmd)
try:
if os.environ.get("TUNE_NEW_EXECUTION") == "0":
self.assertIn(VERBOSE_EXP_OUT_1, output)
self.assertIn(VERBOSE_EXP_OUT_2, output)
self.assertNotIn(VERBOSE_TRIAL_NORM_1, output)
self.assertIsNone(re.search(VERBOSE_TRIAL_NORM_2_PATTERN, output))
self.assertNotIn(VERBOSE_TRIAL_NORM_3, output)
self.assertNotIn(VERBOSE_TRIAL_NORM_4, output)
if os.environ.get("TUNE_NEW_EXECUTION") == "0":
self.assertIn(VERBOSE_TRIAL_DETAIL, output)
# Check that we don't print duplicate results at the end
self.assertTrue(output.count(VERBOSE_TRIAL_WITH_ONCE_RESULT) == 1)
self.assertIn(VERBOSE_TRIAL_WITH_ONCE_COMPLETED, output)
except Exception:
print("*** BEGIN OUTPUT ***")
print(output)
print("*** END OUTPUT ***")
raise
finally:
del os.environ["_TEST_TUNE_TRIAL_UUID"]
def testReporterDetection(self):
"""Test if correct reporter is returned from ``detect_reporter()``"""
reporter = _detect_reporter()
self.assertTrue(isinstance(reporter, CLIReporter))
self.assertFalse(isinstance(reporter, JupyterNotebookReporter))
with patch("ray.tune.progress_reporter.IS_NOTEBOOK", True):
reporter = _detect_reporter()
self.assertFalse(isinstance(reporter, CLIReporter))
self.assertTrue(isinstance(reporter, JupyterNotebookReporter))
trainer_reporter = _detect_reporter(_trainer_api=True)
self.assertFalse(isinstance(trainer_reporter, JupyterNotebookReporter))
self.assertTrue(isinstance(trainer_reporter, CLIReporter))
def testProgressReporterAPI(self):
class CustomReporter(ProgressReporter):
def should_report(self, trials, done=False):
return True
def report(self, trials, done, *sys_info):
pass
tune.run(
lambda config: 2,
num_samples=1,
progress_reporter=CustomReporter(),
verbose=3,
)
def testMaxLen(self):
trials = []
for i in range(5):
t = Mock()
t.status = "TERMINATED"
t.trial_id = "%05d" % i
t.local_experiment_path = "/foo"
t.temporary_state = Mock()
t.temporary_state.location = "here"
t.config = {"verylong" * 20: i}
t.evaluated_params = {"verylong" * 20: i}
t.last_result = {"some_metric": "evenlonger" * 100}
t.__str__ = lambda self: self.trial_id
trials.append(t)
progress_str = _trial_progress_str(
trials, metric_columns=["some_metric"], force_table=True
)
assert any(len(row) <= 90 for row in progress_str.split("\n"))
def test_max_len():
assert (
_max_len("some_long_string/even_longer", max_len=28)
== "some_long_string/even_longer"
)
assert _max_len("some_long_string/even_longer", max_len=15) == ".../even_longer"
assert (
_max_len(
"19_character_string/19_character_string/too_long", max_len=20, wrap=True
)
== "...r_string/19_chara\ncter_string/too_long"
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| ProgressReporterTest |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/external_user_details.py | {
"start": 1215,
"end": 3829
} | class ____(OrganizationEndpoint, ExternalActorEndpointMixin):
owner = ApiOwner.ECOSYSTEM
permission_classes = (ExternalUserPermission,)
publish_status = {
"DELETE": ApiPublishStatus.PUBLIC,
"PUT": ApiPublishStatus.PUBLIC,
}
def convert_args(
self,
request: Request,
organization_id_or_slug: int | str,
external_user_id: int,
*args: Any,
**kwargs: Any,
) -> tuple[tuple[Any, ...], dict[str, Any]]:
args, kwargs = super().convert_args(request, organization_id_or_slug, *args, **kwargs)
kwargs["external_user"] = self.get_external_actor_or_404(
external_user_id, kwargs["organization"]
)
return args, kwargs
@extend_schema(
operation_id="Update an External User",
parameters=[GlobalParams.ORG_ID_OR_SLUG, OrganizationParams.EXTERNAL_USER_ID],
request=ExternalUserSerializer,
responses={
200: ExternalActorSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
},
examples=IntegrationExamples.EXTERNAL_USER_CREATE,
)
def put(
self, request: Request, organization: Organization, external_user: ExternalActor
) -> Response:
"""
Update a user in an external provider that is currently linked to a Sentry user.
"""
self.assert_has_feature(request, organization)
serializer = ExternalUserSerializer(
instance=external_user,
data=request.data,
context={"organization": organization},
partial=True,
)
if serializer.is_valid():
updated_external_user = serializer.save()
return Response(
serialize(updated_external_user, request.user), status=status.HTTP_200_OK
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@extend_schema(
operation_id="Delete an External User",
parameters=[GlobalParams.ORG_ID_OR_SLUG, OrganizationParams.EXTERNAL_USER_ID],
request=None,
responses={
204: RESPONSE_NO_CONTENT,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
},
)
def delete(
self, request: Request, organization: Organization, external_user: ExternalActor
) -> Response:
"""
Delete the link between a user from an external provider and a Sentry user.
"""
external_user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| ExternalUserDetailsEndpoint |
python | pytorch__pytorch | test/torch_np/test_reductions.py | {
"start": 5237,
"end": 10667
} | class ____(TestCase):
def test_sum(self):
m = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
am = np.asarray(m)
assert_equal(np.sum(m), am.sum())
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
zero = np.zeros(1, dtype="float32")[0]
assert_allclose((a / 10.0).sum() - a.size / 10.0, zero, atol=1.5e-4)
a = np.ones(500, dtype=np.float64)
assert_allclose((a / 10.0).sum() - a.size / 10.0, 0.0, atol=1.5e-13)
def test_sum_boolean(self):
a = np.arange(7) % 2 == 0
res = a.sum()
assert_equal(res, 4)
res_float = a.sum(dtype=np.float64)
assert_allclose(res_float, 4.0, atol=1e-15)
assert res_float.dtype == "float64"
@skipIf(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@xpassIfTorchDynamo_np # (reason="sum: does not warn on overflow")
def test_sum_dtypes_warnings(self):
for dt in (int, np.float16, np.float32, np.float64):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, 128, 1024, 1235):
# warning if sum overflows, which it does in float16
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", RuntimeWarning)
tgt = dt(v * (v + 1) / 2)
overflow = not np.isfinite(tgt)
assert_equal(len(w), 1 * overflow)
d = np.arange(1, v + 1, dtype=dt)
assert_almost_equal(np.sum(d), tgt)
assert_equal(len(w), 2 * overflow)
assert_almost_equal(np.sum(np.flip(d)), tgt)
assert_equal(len(w), 3 * overflow)
def test_sum_dtypes_2(self):
for dt in (int, np.float16, np.float32, np.float64):
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.0)
assert_almost_equal(np.sum(d[1::2]), 250.0)
assert_almost_equal(np.sum(d[::3]), 167.0)
assert_almost_equal(np.sum(d[1::3]), 167.0)
assert_almost_equal(np.sum(np.flip(d)[::2]), 250.0)
assert_almost_equal(np.sum(np.flip(d)[1::2]), 250.0)
assert_almost_equal(np.sum(np.flip(d)[::3]), 167.0)
assert_almost_equal(np.sum(np.flip(d)[1::3]), 167.0)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt)
d += d
assert_almost_equal(d, 2.0)
@parametrize("dt", [np.complex64, np.complex128])
def test_sum_complex_1(self, dt):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, 128, 1024, 1235):
tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j)
d = np.empty(v, dtype=dt)
d.real = np.arange(1, v + 1)
d.imag = -np.arange(1, v + 1)
assert_allclose(np.sum(d), tgt, atol=1.5e-5)
assert_allclose(np.sum(np.flip(d)), tgt, atol=1.5e-7)
@parametrize("dt", [np.complex64, np.complex128])
def test_sum_complex_2(self, dt):
d = np.ones(500, dtype=dt) + 1j
assert_allclose(np.sum(d[::2]), 250.0 + 250j, atol=1.5e-7)
assert_allclose(np.sum(d[1::2]), 250.0 + 250j, atol=1.5e-7)
assert_allclose(np.sum(d[::3]), 167.0 + 167j, atol=1.5e-7)
assert_allclose(np.sum(d[1::3]), 167.0 + 167j, atol=1.5e-7)
assert_allclose(np.sum(np.flip(d)[::2]), 250.0 + 250j, atol=1.5e-7)
assert_allclose(np.sum(np.flip(d)[1::2]), 250.0 + 250j, atol=1.5e-7)
assert_allclose(np.sum(np.flip(d)[::3]), 167.0 + 167j, atol=1.5e-7)
assert_allclose(np.sum(np.flip(d)[1::3]), 167.0 + 167j, atol=1.5e-7)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt) + 1j
d += d
assert_allclose(d, 2.0 + 2j, atol=1.5e-7)
@xpassIfTorchDynamo_np # (reason="initial=... need implementing")
def test_sum_initial(self):
# Integer, single axis
assert_equal(np.sum([3], initial=2), 5)
# Floating point
assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
# Multiple non-adjacent axes
assert_equal(
np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
[12, 12, 12],
)
@xpassIfTorchDynamo_np # (reason="where=... need implementing")
def test_sum_where(self):
# More extensive tests done in test_reduction_with_where.
assert_equal(np.sum([[1.0, 2.0], [3.0, 4.0]], where=[True, False]), 4.0)
assert_equal(
np.sum([[1.0, 2.0], [3.0, 4.0]], axis=0, initial=5.0, where=[True, False]),
[9.0, 5.0],
)
parametrize_axis = parametrize(
"axis", [0, 1, 2, -1, -2, (0, 1), (1, 0), (0, 1, 2), (1, -1, 0)]
)
parametrize_func = parametrize(
"func",
[
np.any,
np.all,
np.argmin,
np.argmax,
np.min,
np.max,
np.mean,
np.sum,
np.prod,
np.std,
np.var,
np.count_nonzero,
],
)
fails_axes_tuples = {
np.any,
np.all,
np.argmin,
np.argmax,
np.prod,
}
fails_out_arg = {
np.count_nonzero,
}
restricts_dtype_casts = {np.var, np.std}
fails_empty_tuple = {np.argmin, np.argmax}
@instantiate_parametrized_tests
| TestSum |
python | Textualize__textual | tests/test_lazy.py | {
"start": 169,
"end": 785
} | class ____(App):
def compose(self) -> ComposeResult:
with Vertical():
with Lazy(Horizontal()):
yield Label(id="foo")
with Horizontal():
yield Label(id="bar")
async def test_lazy():
app = LazyApp()
async with app.run_test() as pilot:
# No #foo on initial mount
assert len(app.query("#foo")) == 0
assert len(app.query("#bar")) == 1
await pilot.pause()
await pilot.pause()
# #bar mounted after refresh
assert len(app.query("#foo")) == 1
assert len(app.query("#bar")) == 1
| LazyApp |
python | pydata__xarray | asv_bench/benchmarks/indexing.py | {
"start": 5081,
"end": 5252
} | class ____(Indexing):
def setup(self, key):
requires_dask()
super().setup(key)
self.ds = self.ds.chunk({"x": 100, "y": 50, "t": 50})
| IndexingDask |
python | pytorch__pytorch | torch/_dynamo/mutation_guard.py | {
"start": 2291,
"end": 5166
} | class ____:
generation: int = 0
dynamic_classes: ExactWeakKeyDictionary = ExactWeakKeyDictionary()
generation_values: ExactWeakKeyDictionary = ExactWeakKeyDictionary()
@classmethod
def tag(cls, obj: Any) -> None:
cls.generation_values[obj] = cls.generation
@staticmethod
def mark_class_dynamic(cls: type[torch.nn.Module]) -> None:
assert issubclass(cls, torch.nn.Module)
GenerationTracker.dynamic_classes[cls] = True
@classmethod
def get_generation_value(cls, obj: Any) -> int:
if obj not in cls.generation_values:
return -1
return cls.generation_values[obj]
@classmethod
def check(cls, obj: Any) -> bool:
return (
obj in cls.generation_values
and cls.generation_values[obj] == cls.generation
)
@classmethod
def clear(cls) -> None:
cls.generation = 0
cls.dynamic_classes = ExactWeakKeyDictionary()
cls.generation_values = ExactWeakKeyDictionary()
def is_dynamic_nn_module(obj: Any, is_export: bool) -> bool:
"""Check for nn.Modules() created dynamically or mutated"""
if isinstance(obj, torch.nn.Module) and (
"forward" in obj.__dict__ or isinstance(obj, (dict, MutableMapping))
):
# A monkey patched `.forward` indicates something wacky is going on
# Similarly a nn module also subclassed as a dict is unusual.
return True
if hasattr(obj, "torchdynamo_force_dynamic"):
return obj.torchdynamo_force_dynamic
if (
isinstance(obj, torch.nn.Module)
and config.inline_inbuilt_nn_modules
and (not is_export or config.install_free_tensors)
):
return True
if isinstance(obj, torch.nn.Module) and nn_module_has_global_hooks():
return True
dyn = GenerationTracker.dynamic_classes.get(type(obj)) or GenerationTracker.check(
obj
)
return dyn
def install_generation_tagging_init() -> None:
"""
Monkey patch torch.nn.Module.__init__ and torch.nn.Module.__setstate__
so we can detect nn.Module instances created dynamically inside forward methods.
"""
if getattr(Module, "___needs_generation_tag_patch", True):
init = Module.__init__
def patched_init(self: Module, *args: Any, **kwargs: Any) -> None:
init(self, *args, **kwargs)
GenerationTracker.tag(self)
Module.__init__ = patched_init # type: ignore[method-assign]
setstate = Module.__setstate__
def patched_setstate(self: Module, state: Any) -> None:
setstate(self, state)
GenerationTracker.tag(self)
Module.__setstate__ = patched_setstate # type: ignore[method-assign]
Module.___needs_generation_tag_patch = False # type: ignore[attr-defined]
GenerationTracker.generation += 1
| GenerationTracker |
python | ray-project__ray | python/ray/data/examples/data/video_processing/video_processor.py | {
"start": 1484,
"end": 1696
} | class ____(BaseModel):
"""Lightweight sampling configuration for ``VideoProcessor``."""
fps: Optional[float] = None
num_frames: Optional[int] = None
class Config:
extra = "forbid"
| Sampling |
python | anthropics__anthropic-sdk-python | src/anthropic/types/citation_web_search_result_location_param.py | {
"start": 264,
"end": 517
} | class ____(TypedDict, total=False):
cited_text: Required[str]
encrypted_index: Required[str]
title: Required[Optional[str]]
type: Required[Literal["web_search_result_location"]]
url: Required[str]
| CitationWebSearchResultLocationParam |
python | langchain-ai__langchain | libs/cli/langchain_cli/utils/packages.py | {
"start": 1070,
"end": 2347
} | class ____(TypedDict):
"""Fields from `pyproject.toml` that are relevant to LangServe.
Attributes:
module: The module to import from, `tool.langserve.export_module`
attr: The attribute to import from the module, `tool.langserve.export_attr`
package_name: The name of the package, `tool.poetry.name`
"""
module: str
attr: str
package_name: str
def get_langserve_export(filepath: Path) -> LangServeExport:
"""Get LangServe export information from a `pyproject.toml` file.
Args:
filepath: Path to the `pyproject.toml` file.
Returns:
The LangServeExport information.
Raises:
KeyError: If the `pyproject.toml` file is missing required fields.
"""
with filepath.open() as f:
# tomlkit types aren't amazing - treat as Dict instead
data = cast("dict[str, Any]", load(f))
try:
module = str(data["tool"]["langserve"]["export_module"])
attr = str(data["tool"]["langserve"]["export_attr"])
package_name = str(data["tool"]["poetry"]["name"])
except KeyError as e:
msg = "Invalid LangServe PyProject.toml"
raise KeyError(msg) from e
return LangServeExport(module=module, attr=attr, package_name=package_name)
| LangServeExport |
python | kamyu104__LeetCode-Solutions | Python/range-sum-of-sorted-subarray-sums.py | {
"start": 1876,
"end": 2507
} | class ____(object):
def rangeSum(self, nums, n, left, right):
"""
:type nums: List[int]
:type n: int
:type left: int
:type right: int
:rtype: int
"""
MOD = 10**9+7
min_heap = []
for i, num in enumerate(nums, 1):
heapq.heappush(min_heap, (num, i))
result = 0
for i in xrange(1, right+1):
total, j = heapq.heappop(min_heap)
if i >= left:
result = (result+total)%MOD
if j+1 <= n:
heapq.heappush(min_heap, (total+nums[j], j+1))
return result
| Solution2 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-instagram/components.py | {
"start": 8671,
"end": 10295
} | class ____(RecordTransformation):
"""
The transformation flattens a nested array of breakdown results located at total_value.breakdowns[0].results into a single object
(dictionary). In this transformation, each key-value pair in the resulting object represents a dimension and its corresponding value.
Example input:
{
"total_value": {
"breakdowns": [
{
"dimension_keys": [
"city"
],
"results": [
{
"dimension_values": [
"London, England"
],
"value": 263
},
{
"dimension_values": [
"Sydney, New South Wales"
],
"value": 467
}
]
}
]
},
"id": "id/insights/follower_demographics/lifetime"
}
Example output:
{
"value": {
"London, England": 263,
"Sydney, New South Wales": 467,
}
The nested 'results' array is transformed into a 'value' dictionary where each key is a dimension and each value is the corresponding value.
"""
def transform(self, record: MutableMapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
record_total_value = record.pop("total_value")
record["value"] = {res.get("dimension_values", [""])[0]: res.get("value") for res in record_total_value["breakdowns"][0]["results"]}
return record
@dataclass
| InstagramBreakDownResultsTransformation |
python | apache__airflow | providers/google/src/airflow/providers/google/common/hooks/discovery_api.py | {
"start": 1086,
"end": 6482
} | class ____(GoogleBaseHook):
"""
A hook to use the Google API Discovery Service.
:param api_service_name: The name of the api service that is needed to get the data
for example 'youtube'.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
_conn: Resource | None = None
def __init__(
self,
api_service_name: str,
api_version: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_service_name = api_service_name
self.api_version = api_version
def get_conn(self) -> Resource:
"""
Create an authenticated api client for the given api service name and credentials.
:return: the authenticated api service.
"""
self.log.info("Authenticating Google API Client")
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
serviceName=self.api_service_name,
version=self.api_version,
http=http_authorized,
cache_discovery=False,
)
return self._conn
def query(self, endpoint: str, data: dict, paginate: bool = False, num_retries: int = 0) -> dict:
"""
Create a dynamic API call to any Google API registered in Google's API Client Library and queries it.
:param endpoint: The client libraries path to the api call's executing method.
For example: 'analyticsreporting.reports.batchGet'
.. seealso:: https://developers.google.com/apis-explorer
for more information on what methods are available.
:param data: The data (endpoint params) needed for the specific request to given endpoint.
:param paginate: If set to True, it will collect all pages of data.
:param num_retries: Define the number of retries for the requests being made if it fails.
:return: the API response from the passed endpoint.
"""
google_api_conn_client = self.get_conn()
api_response = self._call_api_request(google_api_conn_client, endpoint, data, paginate, num_retries)
return api_response
def _call_api_request(self, google_api_conn_client, endpoint, data, paginate, num_retries):
api_endpoint_parts = endpoint.split(".")
google_api_endpoint_instance = self._build_api_request(
google_api_conn_client, api_sub_functions=api_endpoint_parts[1:], api_endpoint_params=data
)
if paginate:
return self._paginate_api(
google_api_endpoint_instance, google_api_conn_client, api_endpoint_parts, num_retries
)
return google_api_endpoint_instance.execute(num_retries=num_retries)
def _build_api_request(self, google_api_conn_client, api_sub_functions, api_endpoint_params):
for sub_function in api_sub_functions:
google_api_conn_client = getattr(google_api_conn_client, sub_function)
if sub_function != api_sub_functions[-1]:
google_api_conn_client = google_api_conn_client()
else:
google_api_conn_client = google_api_conn_client(**api_endpoint_params)
return google_api_conn_client
def _paginate_api(
self, google_api_endpoint_instance, google_api_conn_client, api_endpoint_parts, num_retries
):
api_responses = []
while google_api_endpoint_instance:
api_response = google_api_endpoint_instance.execute(num_retries=num_retries)
api_responses.append(api_response)
google_api_endpoint_instance = self._build_next_api_request(
google_api_conn_client, api_endpoint_parts[1:], google_api_endpoint_instance, api_response
)
return api_responses
def _build_next_api_request(
self, google_api_conn_client, api_sub_functions, api_endpoint_instance, api_response
):
for sub_function in api_sub_functions:
if sub_function != api_sub_functions[-1]:
google_api_conn_client = getattr(google_api_conn_client, sub_function)
google_api_conn_client = google_api_conn_client()
else:
google_api_conn_client = getattr(google_api_conn_client, sub_function + "_next")
google_api_conn_client = google_api_conn_client(api_endpoint_instance, api_response)
return google_api_conn_client
| GoogleDiscoveryApiHook |
python | PyCQA__pylint | pylint/checkers/classes/class_checker.py | {
"start": 26741,
"end": 27466
} | class ____:
"""Store the accessed variables per scope."""
def __init__(self) -> None:
self._scopes: defaultdict[
nodes.ClassDef, defaultdict[str, list[_AccessNodes]]
] = defaultdict(_scope_default)
def set_accessed(self, node: _AccessNodes) -> None:
"""Set the given node as accessed."""
frame = node_frame_class(node)
if frame is None:
# The node does not live in a class.
return
self._scopes[frame][node.attrname].append(node)
def accessed(self, scope: nodes.ClassDef) -> dict[str, list[_AccessNodes]]:
"""Get the accessed variables for the given scope."""
return self._scopes.get(scope, {})
| ScopeAccessMap |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 1262,
"end": 2780
} | class ____(models.Model):
"""
A model class for testing regular flat fields.
"""
auto_field = models.AutoField(primary_key=True)
big_integer_field = models.BigIntegerField()
boolean_field = models.BooleanField(default=False)
char_field = models.CharField(max_length=100)
comma_separated_integer_field = models.CommaSeparatedIntegerField(max_length=100)
date_field = models.DateField()
datetime_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=3, decimal_places=1)
email_field = models.EmailField(max_length=100)
float_field = models.FloatField()
integer_field = models.IntegerField()
null_boolean_field = models.BooleanField(null=True, default=False)
positive_integer_field = models.PositiveIntegerField()
positive_small_integer_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField(max_length=100)
small_integer_field = models.SmallIntegerField()
text_field = models.TextField(max_length=100)
file_field = models.FileField(max_length=100)
time_field = models.TimeField()
url_field = models.URLField(max_length=100)
custom_field = CustomField()
file_path_field = models.FilePathField(path=tempfile.gettempdir())
def method(self):
return 'method'
COLOR_CHOICES = (('red', 'Red'), ('blue', 'Blue'), ('green', 'Green'))
DECIMAL_CHOICES = (('low', decimal.Decimal('0.1')), ('medium', decimal.Decimal('0.5')), ('high', decimal.Decimal('0.9')))
| RegularFieldsModel |
python | doocs__leetcode | solution/1500-1599/1524.Number of Sub-arrays With Odd Sum/Solution.py | {
"start": 0,
"end": 269
} | class ____:
def numOfSubarrays(self, arr: List[int]) -> int:
mod = 10**9 + 7
cnt = [1, 0]
ans = s = 0
for x in arr:
s += x
ans = (ans + cnt[s & 1 ^ 1]) % mod
cnt[s & 1] += 1
return ans
| Solution |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/optimization/test_manual_optimization.py | {
"start": 10311,
"end": 20030
} | class ____(collections.abc.Mapping):
"""A custom implementation of Mapping for testing purposes."""
def __init__(self, *args, **kwargs):
self._store = dict(*args, **kwargs)
def __getitem__(self, key):
return self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def __repr__(self):
return f"{self.__class__.__name__}({self._store})"
def __copy__(self):
cls = self.__class__
return cls(self._store.copy())
def copy(self):
return self.__copy__()
@RunIf(min_cuda_gpus=1)
@pytest.mark.parametrize("dicttype", [dict, CustomMapping])
def test_multiple_optimizers_step(tmp_path, dicttype):
"""Tests that `step` works with several optimizers."""
class TestModel(ManualOptModel):
def training_step(self, batch, batch_idx):
opt_a, opt_b = self.optimizers()
x = batch[0]
loss_1 = self(x)
loss_1 = self.loss(loss_1, loss_1)
# make sure there are no grads
assert_emtpy_grad(self.layer.weight.grad)
self.manual_backward(loss_1)
opt_a.step()
# fake discriminator
loss_2 = self(x)
loss_2 = self.loss(loss_2, loss_2)
# ensure we forward the correct params to the optimizer
# without retain_graph we can't do multiple backward passes
self.manual_backward(loss_2, retain_graph=True)
self.manual_backward(loss_2, retain_graph=True)
assert self.layer.weight.grad is not None
opt_b.step()
opt_b.zero_grad()
return dicttype(loss1=loss_1.detach(), loss2=loss_2.detach())
# sister test: tests/plugins/test_amp_plugins.py::test_amp_gradient_unscale
def on_after_backward(self) -> None:
# check grads are scaled
scale = self.trainer.precision_plugin.scaler.get_scale()
assert scale != 1.0 # the return value if not enabled
grads = [p.grad for p in self.parameters()]
inv_scale = 1 / scale
self.original_grads = [p * inv_scale for p in grads]
def check_grads_unscaled(self, optimizer=None):
if optimizer is not None:
scaler = self.trainer.precision_plugin.scaler
state = scaler._per_optimizer_states[id(optimizer)]
assert state["stage"].name == "UNSCALED"
grads = [p.grad for p in self.parameters()]
assert len(grads) == len(self.original_grads)
for actual, expected in zip(grads, self.original_grads):
torch.testing.assert_close(actual, expected)
def on_before_optimizer_step(self, optimizer, *_):
self.check_grads_unscaled(optimizer)
model = TestModel()
model.val_dataloader = None
limit_train_batches = 2
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=limit_train_batches,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
enable_model_summary=False,
precision="16-mixed",
accelerator="gpu",
devices=1,
)
with mock.patch.object(Strategy, "backward", wraps=trainer.strategy.backward) as bwd_mock:
trainer.fit(model)
assert bwd_mock.call_count == limit_train_batches * 3
def test_step_with_optimizer_closure(tmp_path):
"""Tests that `step` works with optimizer_closure."""
seed_everything(1)
class TestModel(BoringModel):
_losses = []
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx):
# make sure there are no grads
assert_emtpy_grad(self.layer.weight.grad)
opt = self.optimizers()
def compute_loss():
x = batch[0]
x = F.dropout(x, 0.1)
predictions = self(x)
predictions = F.dropout(predictions, 0.1)
return self.loss(predictions)
def optimizer_closure():
# emulate bayesian optimization.
num_backward = 2
losses = []
for backward_idx in range(num_backward):
loss = compute_loss()
losses.append(loss)
retain_graph = (num_backward - 1) != backward_idx
self.manual_backward(loss, retain_graph=retain_graph)
# emulate MC dropout training
loss = torch.stack(losses).mean()
self._losses.append(loss)
self.log("train_loss", loss, on_step=True, prog_bar=True, on_epoch=True)
assert losses[0] != losses[1]
weight_before = self.layer.weight.clone()
opt.step(closure=optimizer_closure)
opt.zero_grad()
weight_after = self.layer.weight.clone()
assert not torch.equal(weight_before, weight_after)
model = TestModel()
limit_train_batches = 2
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=limit_train_batches,
limit_val_batches=0,
max_epochs=1,
log_every_n_steps=1,
)
with mock.patch.object(Strategy, "backward", wraps=trainer.strategy.backward) as bwd_mock:
trainer.fit(model)
assert bwd_mock.call_count == limit_train_batches * 2
assert trainer.progress_bar_metrics["train_loss_step"] == model._losses[-1]
assert trainer.progress_bar_metrics["train_loss_epoch"] == torch.stack(model._losses).mean()
def test_step_with_optimizer_closure_2(tmp_path):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx):
opt = self.optimizers()
x = batch[0]
loss = self(x).sum()
def optimizer_closure():
# emulate bayesian optimization.
num_backward = 1
for backward_idx in range(num_backward + 1):
retain_graph = num_backward != backward_idx
self.manual_backward(loss, retain_graph=retain_graph)
weight_before = self.layer.weight.clone()
opt.step(closure=optimizer_closure)
weight_after = self.layer.weight.clone()
assert not torch.equal(weight_before, weight_after)
model = TestModel()
limit_train_batches = 4
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=limit_train_batches,
limit_val_batches=0,
max_epochs=1,
log_every_n_steps=1,
)
with mock.patch.object(Strategy, "backward", wraps=trainer.strategy.backward) as bwd_mock:
trainer.fit(model)
assert bwd_mock.call_count == limit_train_batches * 2
assert trainer.global_step == limit_train_batches
@patch("torch.optim.Adam.step")
@patch("torch.optim.SGD.step")
def test_step_with_optimizer_closure_with_different_frequencies(mock_sgd_step, mock_adam_step, tmp_path):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def on_train_start(self) -> None:
mock_sgd_step.reset_mock()
mock_adam_step.reset_mock()
def training_step(self, batch, batch_idx):
# emulate gans training
opt_gen, opt_dis = self.optimizers()
# Note: Be careful, don't log on the same key in self.log in both closure
# as they will be aggregated together on epoch_end
def compute_loss():
x = batch[0]
x = F.dropout(x, 0.1)
predictions = self(x)
predictions = F.dropout(predictions, 0.1)
return self.loss(predictions)
def gen_closure():
loss_gen = compute_loss()
self.log("loss_gen", loss_gen, on_step=True, on_epoch=True)
self.manual_backward(loss_gen)
def dis_closure():
loss_dis = compute_loss()
self.log("loss_dis", loss_dis, on_step=True, on_epoch=True)
self.manual_backward(loss_dis)
# this will accumulate gradients for 2 batches and then call opt_gen.step()
gen_closure()
if batch_idx % 2 == 0:
# passing a custom kwarg
opt_gen.step(closure=gen_closure, optim="sgd")
opt_gen.zero_grad()
# update discriminator every 4 baches
# therefore, no gradient accumulation for discriminator
if batch_idx % 4 == 0:
opt_dis.step(closure=dis_closure)
opt_dis.zero_grad()
def configure_optimizers(self):
optimizer_gen = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_dis = torch.optim.Adam(self.layer.parameters(), lr=0.001)
return [optimizer_gen, optimizer_dis]
model = TestModel()
model.val_dataloader = None
limit_train_batches = 8
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=limit_train_batches,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
)
trainer.fit(model)
assert mock_sgd_step.mock_calls == [call(closure=ANY, optim="sgd") for _ in range(4)]
assert mock_adam_step.mock_calls == [call(closure=ANY) for _ in range(2)]
assert trainer.global_step == 4 + 2
| CustomMapping |
python | PrefectHQ__prefect | src/prefect/server/events/storage/__init__.py | {
"start": 354,
"end": 2793
} | class ____(ValueError):
pass
def to_page_token(
filter: "EventFilter", count: int, page_size: int, current_offset: int
) -> Optional[str]:
if current_offset + page_size >= count:
return None
return b64encode(
json.dumps(
{
"filter": filter.model_dump(mode="json"),
"count": count,
"page_size": page_size,
"offset": current_offset + page_size,
}
).encode()
).decode()
def from_page_token(page_token: str) -> Tuple["EventFilter", int, int, int]:
from prefect.server.events.filters import EventFilter
try:
parameters = json.loads(b64decode(page_token))
except Exception:
# If we can't parse the page token, this likely indicates that something was
# wrong with the query parameters (perhaps truncated or otherwise manipulated).
# Treat this as a request for nothing
raise InvalidTokenError("Unable to parse page token")
return (
EventFilter.model_validate(parameters["filter"]),
parameters["count"],
parameters["page_size"],
parameters["offset"],
)
def process_time_based_counts(
filter: "EventFilter",
time_unit: TimeUnit,
time_interval: float,
counts: List[EventCount],
) -> List[EventCount]:
"""
Common logic for processing time-based counts across different event backends.
When doing time-based counting we want to do two things:
1. Backfill any missing intervals with 0 counts.
2. Update the start/end times that are emitted to match the beginning and
end of the intervals rather than having them reflect the true max/min
occurred time of the events themselves.
"""
span_generator = time_unit.get_interval_spans(
filter.occurred.since, filter.occurred.until, time_interval
)
spans_since_pivot = next(span_generator)
assert isinstance(spans_since_pivot, int)
backfilled_counts = [
EventCount(
value=str(i),
count=0,
label=start_time.isoformat(),
start_time=start_time,
end_time=end_time,
)
for i, (start_time, end_time) in enumerate(span_generator)
]
for count in counts:
index = int(float(count.value)) - spans_since_pivot
backfilled_counts[index].count = count.count
return backfilled_counts
| InvalidTokenError |
python | getsentry__sentry | src/sentry/apidocs/examples/session_examples.py | {
"start": 51,
"end": 1104
} | class ____:
QUERY_SESSIONS = [
OpenApiExample(
"Query Sessions",
value={
"groups": [
{
"by": {"session.status": "errored"},
"totals": {"sum(session)": 1000},
"series": {"sum(session)": [368, 392, 240]},
},
{
"by": {"session.status": "healthy"},
"totals": {"sum(session)": 17905998},
"series": {"sum(session)": [6230841, 6923689, 4751465]},
},
],
"start": "2024-01-29T07:30:00Z",
"end": "2024-01-29T09:00:00Z",
"intervals": [
"2024-01-29T07:30:00Z",
"2024-01-29T08:00:00Z",
"2024-01-29T08:30:00Z",
],
"query": "",
},
status_codes=["200"],
response_only=True,
)
]
| SessionExamples |
python | huggingface__transformers | src/transformers/models/gpt_neo/modeling_gpt_neo.py | {
"start": 6920,
"end": 11785
} | class ____(GPTNeoSelfAttention):
"""
GPTNeo flash attention module. This module inherits from `GPTNeoSelfAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
def forward(
self,
hidden_states,
attention_mask=None,
layer_past=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
bsz, _, _ = hidden_states.size()
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
cache_kwargs = {"cache_position": cache_position}
key, value = layer_past.update(key, value, self.layer_id, cache_kwargs)
query_length = query.shape[2]
tgt_len = key.shape[2]
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dim x hidden_dim
query = query.transpose(1, 2).view(bsz, query_length, self.num_heads, self.head_dim)
key = key.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim)
value = value.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim)
attn_dropout = self.config.attention_dropout if self.training else 0.0
if attention_mask is not None: # no matter the length, we just slice it
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (LlamaRMSNorm handles it correctly)
device_type = query.device.type if query.device.type != "mps" else "cpu"
if query.dtype == torch.float32:
if torch.is_autocast_enabled():
# NOTE: `torch.get_autocast_dtype` is there starting from PyTorch 2.4
target_dtype = (
torch.get_autocast_dtype(device_type)
if hasattr(torch, "get_autocast_dtype")
else torch.get_autocast_gpu_dtype()
)
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
f" {target_dtype}."
)
query = query.to(target_dtype)
key = key.to(target_dtype)
value = value.to(target_dtype)
attn_output = _flash_attention_forward(
query,
key,
value,
attention_mask,
query_length,
dropout=attn_dropout,
softmax_scale=1.0,
is_causal=self.is_causal,
use_top_left_mask=self._flash_attn_uses_top_left_mask,
)
attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim)
attn_output = self.out_proj(attn_weights_reshaped)
attn_output = self.resid_dropout(attn_output)
return attn_output, attn_weights_reshaped
GPT_NEO_ATTENTION_CLASSES = {
"eager": GPTNeoSelfAttention,
"flash_attention_2": GPTNeoFlashAttention2,
}
| GPTNeoFlashAttention2 |
python | django-extensions__django-extensions | django_extensions/management/commands/dumpscript.py | {
"start": 8650,
"end": 15519
} | class ____(Code):
"""Produces a python script that can recreate data for a given model instance."""
def __init__(
self, instance, id, context=None, stdout=None, stderr=None, options=None
):
"""We need the instance in question and an id"""
super().__init__(indent=0, stdout=stdout, stderr=stderr)
self.imports = {}
self.options = options
self.instance = instance
self.model = self.instance.__class__
if context is None:
context = {}
self.context = context
self.variable_name = "%s_%s" % (self.instance._meta.db_table, id)
self.skip_me = None
self.instantiated = False
self.waiting_list = list(self.model._meta.fields)
self.many_to_many_waiting_list = {}
for field in self.model._meta.many_to_many:
try:
if not field.remote_field.through._meta.auto_created:
continue
except AttributeError:
pass
self.many_to_many_waiting_list[field] = list(
getattr(self.instance, field.name).all()
)
def get_lines(self, force=False):
"""
Return a list of lists or strings, representing the code body.
Each list is a block, each string is a statement.
force (True or False): if an attribute object cannot be included,
it is usually skipped to be processed later. With 'force' set, there
will be no waiting: a get_or_create() call is written instead.
"""
code_lines = []
# Don't return anything if this is an instance that should be skipped
if self.skip():
return []
# Initialise our new object
# e.g. model_name_35 = Model()
code_lines += self.instantiate()
# Add each field
# e.g. model_name_35.field_one = 1034.91
# model_name_35.field_two = "text"
code_lines += self.get_waiting_list()
if force:
# TODO: Check that M2M are not affected
code_lines += self.get_waiting_list(force=force)
# Print the save command for our new object
# e.g. model_name_35.save()
if code_lines:
code_lines.append(
"%s = importer.save_or_locate(%s)\n"
% (self.variable_name, self.variable_name)
)
code_lines += self.get_many_to_many_lines(force=force)
return code_lines
lines = property(get_lines)
def skip(self):
"""
Determine whether or not this object should be skipped.
If this model instance is a parent of a single subclassed
instance, skip it. The subclassed instance will create this
parent instance for us.
TODO: Allow the user to force its creation?
"""
if self.skip_me is not None:
return self.skip_me
cls = self.instance.__class__
using = router.db_for_write(cls, instance=self.instance)
collector = Collector(using=using)
collector.collect([self.instance], collect_related=False)
sub_objects = sum([list(i) for i in collector.data.values()], [])
sub_objects_parents = [so._meta.parents for so in sub_objects]
if [self.model in p for p in sub_objects_parents].count(True) == 1:
# since this instance isn't explicitly created, it's variable name
# can't be referenced in the script, so record None in context dict
pk_name = self.instance._meta.pk.name
key = "%s_%s" % (self.model.__name__, getattr(self.instance, pk_name))
self.context[key] = None
self.skip_me = True
else:
self.skip_me = False
return self.skip_me
def instantiate(self):
"""Write lines for instantiation"""
# e.g. model_name_35 = Model()
code_lines = []
if not self.instantiated:
code_lines.append("%s = %s()" % (self.variable_name, self.model.__name__))
self.instantiated = True
# Store our variable name for future foreign key references
pk_name = self.instance._meta.pk.name
key = "%s_%s" % (self.model.__name__, getattr(self.instance, pk_name))
self.context[key] = self.variable_name
return code_lines
def get_waiting_list(self, force=False):
"""Add lines for any waiting fields that can be completed now."""
code_lines = []
skip_autofield = self.options["skip_autofield"]
# Process normal fields
for field in list(self.waiting_list):
try:
# Find the value, add the line, remove from waiting list and move on
value = get_attribute_value(
self.instance,
field,
self.context,
force=force,
skip_autofield=skip_autofield,
)
code_lines.append(
"%s.%s = %s" % (self.variable_name, field.name, value)
)
self.waiting_list.remove(field)
except SkipValue:
# Remove from the waiting list and move on
self.waiting_list.remove(field)
continue
except DoLater:
# Move on, maybe next time
continue
return code_lines
def get_many_to_many_lines(self, force=False):
"""Generate lines that define many to many relations for this instance."""
lines = []
for field, rel_items in self.many_to_many_waiting_list.items():
for rel_item in list(rel_items):
try:
pk_name = rel_item._meta.pk.name
key = "%s_%s" % (
rel_item.__class__.__name__,
getattr(rel_item, pk_name),
)
value = "%s" % self.context[key]
lines.append(
"%s.%s.add(%s)" % (self.variable_name, field.name, value)
)
self.many_to_many_waiting_list[field].remove(rel_item)
except KeyError:
if force:
item_locator = orm_item_locator(rel_item)
self.context["__extra_imports"][rel_item._meta.object_name] = (
rel_item.__module__
)
lines.append(
"%s.%s.add( %s )"
% (self.variable_name, field.name, item_locator)
)
self.many_to_many_waiting_list[field].remove(rel_item)
if lines:
lines.append("")
return lines
| InstanceCode |
python | ray-project__ray | python/ray/dashboard/utils.py | {
"start": 14758,
"end": 16177
} | class ____(Immutable, Sequence):
"""Makes a :class:`list` immutable."""
__slots__ = ("_list", "_proxy")
def __init__(self, list_value):
if type(list_value) not in (list, ImmutableList):
raise TypeError(f"{type(list_value)} object is not a list.")
if isinstance(list_value, ImmutableList):
list_value = list_value.mutable()
self._list = list_value
self._proxy = [None] * len(list_value)
def __reduce_ex__(self, protocol):
return type(self), (self._list,)
def mutable(self):
return self._list
def __eq__(self, other):
if isinstance(other, ImmutableList):
other = other.mutable()
return list.__eq__(self._list, other)
def __ne__(self, other):
if isinstance(other, ImmutableList):
other = other.mutable()
return list.__ne__(self._list, other)
def __contains__(self, item):
if isinstance(item, Immutable):
item = item.mutable()
return list.__contains__(self._list, item)
def __getitem__(self, item):
proxy = self._proxy[item]
if proxy is None:
proxy = self._proxy[item] = make_immutable(self._list[item])
return proxy
def __len__(self):
return len(self._list)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, list.__repr__(self._list))
| ImmutableList |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sql_datasource.py | {
"start": 7446,
"end": 7833
} | class ____(_PartitionerDatetime):
column_name: str
sort_ascending: bool = True
method_name: Literal["partition_on_year"] = "partition_on_year"
@property
@override
def param_names(self) -> List[str]:
return ["year"]
@override
def partitioner_method_kwargs(self) -> Dict[str, Any]:
return {"column_name": self.column_name}
| SqlPartitionerYear |
python | jina-ai__jina | tests/docker_compose/custom-gateway/dummy_gateway.py | {
"start": 361,
"end": 1660
} | class ____(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/debug',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
| DummyGateway |
python | doocs__leetcode | solution/1500-1599/1515.Best Position for a Service Centre/Solution.py | {
"start": 0,
"end": 807
} | class ____:
def getMinDistSum(self, positions: List[List[int]]) -> float:
n = len(positions)
x = y = 0
for x1, y1 in positions:
x += x1
y += y1
x, y = x / n, y / n
decay = 0.999
eps = 1e-6
alpha = 0.5
while 1:
grad_x = grad_y = 0
dist = 0
for x1, y1 in positions:
a = x - x1
b = y - y1
c = sqrt(a * a + b * b)
grad_x += a / (c + 1e-8)
grad_y += b / (c + 1e-8)
dist += c
dx = grad_x * alpha
dy = grad_y * alpha
x -= dx
y -= dy
alpha *= decay
if abs(dx) <= eps and abs(dy) <= eps:
return dist
| Solution |
python | pypa__setuptools | setuptools/command/build.py | {
"start": 462,
"end": 6052
} | class ____(Protocol):
"""In order to support editable installations (see :pep:`660`) all
build subcommands **SHOULD** implement this protocol. They also **MUST** inherit
from ``setuptools.Command``.
When creating an :pep:`editable wheel <660>`, ``setuptools`` will try to evaluate
custom ``build`` subcommands using the following procedure:
1. ``setuptools`` will set the ``editable_mode`` attribute to ``True``
2. ``setuptools`` will execute the ``run()`` command.
.. important::
Subcommands **SHOULD** take advantage of ``editable_mode=True`` to adequate
its behaviour or perform optimisations.
For example, if a subcommand doesn't need to generate an extra file and
all it does is to copy a source file into the build directory,
``run()`` **SHOULD** simply "early return".
Similarly, if the subcommand creates files that would be placed alongside
Python files in the final distribution, during an editable install
the command **SHOULD** generate these files "in place" (i.e. write them to
the original source directory, instead of using the build directory).
Note that ``get_output_mapping()`` should reflect that and include mappings
for "in place" builds accordingly.
3. ``setuptools`` use any knowledge it can derive from the return values of
``get_outputs()`` and ``get_output_mapping()`` to create an editable wheel.
When relevant ``setuptools`` **MAY** attempt to use file links based on the value
of ``get_output_mapping()``. Alternatively, ``setuptools`` **MAY** attempt to use
:doc:`import hooks <python:reference/import>` to redirect any attempt to import
to the directory with the original source code and other files built in place.
Please note that custom sub-commands **SHOULD NOT** rely on ``run()`` being
executed (or not) to provide correct return values for ``get_outputs()``,
``get_output_mapping()`` or ``get_source_files()``. The ``get_*`` methods should
work independently of ``run()``.
"""
editable_mode: bool = False
"""Boolean flag that will be set to ``True`` when setuptools is used for an
editable installation (see :pep:`660`).
Implementations **SHOULD** explicitly set the default value of this attribute to
``False``.
When subcommands run, they can use this flag to perform optimizations or change
their behaviour accordingly.
"""
build_lib: str
"""String representing the directory where the build artifacts should be stored,
e.g. ``build/lib``.
For example, if a distribution wants to provide a Python module named ``pkg.mod``,
then a corresponding file should be written to ``{build_lib}/package/module.py``.
A way of thinking about this is that the files saved under ``build_lib``
would be eventually copied to one of the directories in :obj:`site.PREFIXES`
upon installation.
A command that produces platform-independent files (e.g. compiling text templates
into Python functions), **CAN** initialize ``build_lib`` by copying its value from
the ``build_py`` command. On the other hand, a command that produces
platform-specific files **CAN** initialize ``build_lib`` by copying its value from
the ``build_ext`` command. In general this is done inside the ``finalize_options``
method with the help of the ``set_undefined_options`` command::
def finalize_options(self):
self.set_undefined_options("build_py", ("build_lib", "build_lib"))
...
"""
def initialize_options(self) -> None:
"""(Required by the original :class:`setuptools.Command` interface)"""
...
def finalize_options(self) -> None:
"""(Required by the original :class:`setuptools.Command` interface)"""
...
def run(self) -> None:
"""(Required by the original :class:`setuptools.Command` interface)"""
...
def get_source_files(self) -> list[str]:
"""
Return a list of all files that are used by the command to create the expected
outputs.
For example, if your build command transpiles Java files into Python, you should
list here all the Java files.
The primary purpose of this function is to help populating the ``sdist``
with all the files necessary to build the distribution.
All files should be strings relative to the project root directory.
"""
...
def get_outputs(self) -> list[str]:
"""
Return a list of files intended for distribution as they would have been
produced by the build.
These files should be strings in the form of
``"{build_lib}/destination/file/path"``.
.. note::
The return value of ``get_output()`` should include all files used as keys
in ``get_output_mapping()`` plus files that are generated during the build
and don't correspond to any source file already present in the project.
"""
...
def get_output_mapping(self) -> dict[str, str]:
"""
Return a mapping between destination files as they would be produced by the
build (dict keys) into the respective existing (source) files (dict values).
Existing (source) files should be represented as strings relative to the project
root directory.
Destination files should be strings in the form of
``"{build_lib}/destination/file/path"``.
"""
...
| SubCommand |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/translate.py | {
"start": 2639,
"end": 5696
} | class ____(GoogleBaseHook):
"""
Hook for Google Cloud translate APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._client: Client | None = None
def get_conn(self) -> Client:
"""
Retrieve connection to Cloud Translate.
:return: Google Cloud Translate client object.
"""
if not self._client:
self._client = Client(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.quota_retry()
def translate(
self,
values: str | list[str],
target_language: str,
format_: str | None = None,
source_language: str | None = None,
model: str | list[str] | None = None,
) -> dict:
"""
Translate a string or list of strings.
See https://cloud.google.com/translate/docs/translating-text
:param values: String or list of strings to translate.
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:param source_language: (Optional) The language of the text to
be translated.
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'NMT'``.
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases)
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`~exceptions.ValueError` if the number of
values and translations differ.
"""
client = self.get_conn()
return client.translate(
values=values,
target_language=target_language,
format_=format_,
source_language=source_language,
model=model,
)
| CloudTranslateHook |
python | numpy__numpy | numpy/_core/tests/test_scalarmath.py | {
"start": 33461,
"end": 46217
} | class ____:
@pytest.mark.parametrize("type_code", np.typecodes['AllInteger'])
def test_integer_hashes(self, type_code):
scalar = np.dtype(type_code).type
for i in range(128):
assert hash(i) == hash(scalar(i))
@pytest.mark.parametrize("type_code", np.typecodes['AllFloat'])
def test_float_and_complex_hashes(self, type_code):
scalar = np.dtype(type_code).type
for val in [np.pi, np.inf, 3, 6.]:
numpy_val = scalar(val)
# Cast back to Python, in case the NumPy scalar has less precision
if numpy_val.dtype.kind == 'c':
val = complex(numpy_val)
else:
val = float(numpy_val)
assert val == numpy_val
assert hash(val) == hash(numpy_val)
if hash(float(np.nan)) != hash(float(np.nan)):
# If Python distinguishes different NaNs we do so too (gh-18833)
assert hash(scalar(np.nan)) != hash(scalar(np.nan))
@pytest.mark.parametrize("type_code", np.typecodes['Complex'])
def test_complex_hashes(self, type_code):
# Test some complex valued hashes specifically:
scalar = np.dtype(type_code).type
for val in [np.pi + 1j, np.inf - 3j, 3j, 6. + 1j]:
numpy_val = scalar(val)
assert hash(complex(numpy_val)) == hash(numpy_val)
@contextlib.contextmanager
def recursionlimit(n):
o = sys.getrecursionlimit()
try:
sys.setrecursionlimit(n)
yield
finally:
sys.setrecursionlimit(o)
@given(sampled_from(objecty_things),
sampled_from(binary_operators_for_scalar_ints),
sampled_from(types + [rational]))
@pytest.mark.thread_unsafe(reason="sets recursion limit globally")
def test_operator_object_left(o, op, type_):
try:
with recursionlimit(200):
op(o, type_(1))
except TypeError:
pass
@given(sampled_from(objecty_things),
sampled_from(binary_operators_for_scalar_ints),
sampled_from(types + [rational]))
@pytest.mark.thread_unsafe(reason="sets recursion limit globally")
def test_operator_object_right(o, op, type_):
try:
with recursionlimit(200):
op(type_(1), o)
except TypeError:
pass
@given(sampled_from(binary_operators_for_scalars),
sampled_from(types),
sampled_from(types))
def test_operator_scalars(op, type1, type2):
try:
op(type1(1), type2(1))
except TypeError:
pass
@pytest.mark.parametrize("op", binary_operators_for_scalars)
@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble])
def test_longdouble_operators_with_obj(sctype, op):
# This is/used to be tricky, because NumPy generally falls back to
# using the ufunc via `np.asarray()`, this effectively might do:
# longdouble + None
# -> asarray(longdouble) + np.array(None, dtype=object)
# -> asarray(longdouble).astype(object) + np.array(None, dtype=object)
# And after getting the scalars in the inner loop:
# -> longdouble + None
#
# That would recurse infinitely. Other scalars return the python object
# on cast, so this type of things works OK.
#
# As of NumPy 2.1, this has been consolidated into the np.generic binops
# and now checks `.item()`. That also allows the below path to work now.
try:
op(sctype(3), None)
except TypeError:
pass
try:
op(None, sctype(3))
except TypeError:
pass
@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.sub])
@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble])
def test_longdouble_with_arrlike(sctype, op):
# As of NumPy 2.1, longdouble behaves like other types and can coerce
# e.g. lists. (Not necessarily better, but consistent.)
assert_array_equal(op(sctype(3), [1, 2]), op(3, np.array([1, 2])))
assert_array_equal(op([1, 2], sctype(3)), op(np.array([1, 2]), 3))
@pytest.mark.parametrize("op", binary_operators_for_scalars)
@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble])
@np.errstate(all="ignore")
def test_longdouble_operators_with_large_int(sctype, op):
# (See `test_longdouble_operators_with_obj` for why longdouble is special)
# NEP 50 means that the result is clearly a (c)longdouble here:
if sctype == np.clongdouble and op in [operator.mod, operator.floordiv]:
# The above operators are not support for complex though...
with pytest.raises(TypeError):
op(sctype(3), 2**64)
with pytest.raises(TypeError):
op(sctype(3), 2**64)
else:
assert op(sctype(3), -2**64) == op(sctype(3), sctype(-2**64))
assert op(2**64, sctype(3)) == op(sctype(2**64), sctype(3))
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
@pytest.mark.parametrize("operation", [
lambda min, max: max + max,
lambda min, max: min - max,
lambda min, max: max * max], ids=["+", "-", "*"])
def test_scalar_integer_operation_overflow(dtype, operation):
st = np.dtype(dtype).type
min = st(np.iinfo(dtype).min)
max = st(np.iinfo(dtype).max)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
operation(min, max)
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
@pytest.mark.parametrize("operation", [
lambda min, neg_1: -min,
lambda min, neg_1: abs(min),
lambda min, neg_1: min * neg_1,
pytest.param(lambda min, neg_1: min // neg_1,
marks=pytest.mark.skip(reason="broken on some platforms"))],
ids=["neg", "abs", "*", "//"])
def test_scalar_signed_integer_overflow(dtype, operation):
# The minimum signed integer can "overflow" for some additional operations
st = np.dtype(dtype).type
min = st(np.iinfo(dtype).min)
neg_1 = st(-1)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
operation(min, neg_1)
@pytest.mark.parametrize("dtype", np.typecodes["UnsignedInteger"])
def test_scalar_unsigned_integer_overflow(dtype):
val = np.dtype(dtype).type(8)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
-val
zero = np.dtype(dtype).type(0)
-zero # does not warn
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
@pytest.mark.parametrize("operation", [
lambda val, zero: val // zero,
lambda val, zero: val % zero, ], ids=["//", "%"])
def test_scalar_integer_operation_divbyzero(dtype, operation):
st = np.dtype(dtype).type
val = st(100)
zero = st(0)
with pytest.warns(RuntimeWarning, match="divide by zero"):
operation(val, zero)
ops_with_names = [
("__lt__", "__gt__", operator.lt, True),
("__le__", "__ge__", operator.le, True),
("__eq__", "__eq__", operator.eq, True),
# Note __op__ and __rop__ may be identical here:
("__ne__", "__ne__", operator.ne, True),
("__gt__", "__lt__", operator.gt, True),
("__ge__", "__le__", operator.ge, True),
("__floordiv__", "__rfloordiv__", operator.floordiv, False),
("__truediv__", "__rtruediv__", operator.truediv, False),
("__add__", "__radd__", operator.add, False),
("__mod__", "__rmod__", operator.mod, False),
("__mul__", "__rmul__", operator.mul, False),
("__pow__", "__rpow__", operator.pow, False),
("__sub__", "__rsub__", operator.sub, False),
]
@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
@pytest.mark.parametrize("sctype", [np.float32, np.float64, np.longdouble])
def test_subclass_deferral(sctype, __op__, __rop__, op, cmp):
"""
This test covers scalar subclass deferral. Note that this is exceedingly
complicated, especially since it tends to fall back to the array paths and
these additionally add the "array priority" mechanism.
The behaviour was modified subtly in 1.22 (to make it closer to how Python
scalars work). Due to its complexity and the fact that subclassing NumPy
scalars is probably a bad idea to begin with. There is probably room
for adjustments here.
"""
class myf_simple1(sctype):
pass
class myf_simple2(sctype):
pass
def op_func(self, other):
return __op__
def rop_func(self, other):
return __rop__
myf_op = type("myf_op", (sctype,), {__op__: op_func, __rop__: rop_func})
# inheritance has to override, or this is correctly lost:
res = op(myf_simple1(1), myf_simple2(2))
assert type(res) == sctype or type(res) == np.bool
assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited
# Two independent subclasses do not really define an order. This could
# be attempted, but we do not since Python's `int` does neither:
assert op(myf_op(1), myf_simple1(2)) == __op__
assert op(myf_simple1(1), myf_op(2)) == op(1, 2) # inherited
def test_longdouble_complex():
# Simple test to check longdouble and complex combinations, since these
# need to go through promotion, which longdouble needs to be careful about.
x = np.longdouble(1)
assert x + 1j == 1 + 1j
assert 1j + x == 1 + 1j
@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
@pytest.mark.parametrize("subtype", [float, int, complex, np.float16])
def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp):
# This tests that python scalar subclasses behave like a float64 (if they
# don't override it).
# In an earlier version of NEP 50, they behaved like the Python buildins.
def op_func(self, other):
return __op__
def rop_func(self, other):
return __rop__
# Check that deferring is indicated using `__array_ufunc__`:
myt = type("myt", (subtype,),
{__op__: op_func, __rop__: rop_func, "__array_ufunc__": None})
# Just like normally, we should never presume we can modify the float.
assert op(myt(1), np.float64(2)) == __op__
assert op(np.float64(1), myt(2)) == __rop__
if op in {operator.mod, operator.floordiv} and subtype == complex:
return # module is not support for complex. Do not test.
if __rop__ == __op__:
return
# When no deferring is indicated, subclasses are handled normally.
myt = type("myt", (subtype,), {__rop__: rop_func})
behaves_like = lambda x: np.array(subtype(x))[()]
# Check for float32, as a float subclass float64 may behave differently
res = op(myt(1), np.float16(2))
expected = op(behaves_like(1), np.float16(2))
assert res == expected
assert type(res) == type(expected)
res = op(np.float32(2), myt(1))
expected = op(np.float32(2), behaves_like(1))
assert res == expected
assert type(res) == type(expected)
# Same check for longdouble (compare via dtype to accept float64 when
# longdouble has the identical size), which is currently not perfectly
# consistent.
res = op(myt(1), np.longdouble(2))
expected = op(behaves_like(1), np.longdouble(2))
assert res == expected
assert np.dtype(type(res)) == np.dtype(type(expected))
res = op(np.float32(2), myt(1))
expected = op(np.float32(2), behaves_like(1))
assert res == expected
assert np.dtype(type(res)) == np.dtype(type(expected))
def test_truediv_int():
# This should work, as the result is float:
assert np.uint8(3) / 123454 == np.float64(3) / 123454
@pytest.mark.slow
@pytest.mark.parametrize("op",
# TODO: Power is a bit special, but here mostly bools seem to behave oddly
[op for op in binary_operators_for_scalars if op is not operator.pow])
@pytest.mark.parametrize("sctype", types)
@pytest.mark.parametrize("other_type", [float, int, complex])
@pytest.mark.parametrize("rop", [True, False])
def test_scalar_matches_array_op_with_pyscalar(op, sctype, other_type, rop):
# Check that the ufunc path matches by coercing to an array explicitly
val1 = sctype(2)
val2 = other_type(2)
if rop:
_op = op
op = lambda x, y: _op(y, x)
try:
res = op(val1, val2)
except TypeError:
try:
expected = op(np.asarray(val1), val2)
raise AssertionError("ufunc didn't raise.")
except TypeError:
return
else:
expected = op(np.asarray(val1), val2)
# Note that we only check dtype equivalency, as ufuncs may pick the lower
# dtype if they are equivalent.
assert res == expected
if isinstance(val1, float) and other_type is complex and rop:
# Python complex accepts float subclasses, so we don't get a chance
# and the result may be a Python complex (thus, the `np.array()``)
assert np.array(res).dtype == expected.dtype
else:
assert res.dtype == expected.dtype
| TestHash |
python | getsentry__sentry | src/sentry/utils/kvstore/memory.py | {
"start": 297,
"end": 1340
} | class ____(KVStorage[K, V]):
"""
This class provides an in-memory key/value store. It is intended for use
in testing as a lightweight substitute for other backends.
"""
def __init__(self) -> None:
self.__records: MutableMapping[K, Record[V]] = {}
def get(self, key: K) -> V | None:
try:
record = self.__records[key]
except KeyError:
return None
if record.expires_at is not None and datetime.now() > record.expires_at:
del self.__records[key]
return None
return record.value
def set(self, key: K, value: V, ttl: timedelta | None = None) -> None:
self.__records[key] = Record(value, datetime.now() + ttl if ttl is not None else None)
def delete(self, key: K) -> None:
try:
del self.__records[key]
except KeyError:
pass
def bootstrap(self, automatic_expiry: bool = True) -> None:
pass
def destroy(self) -> None:
self.__records.clear()
| MemoryKVStorage |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol29.py | {
"start": 317,
"end": 576
} | class ____(Protocol[_T]):
@property
def func(self) -> Callable[..., _T]: ...
def __new__(
cls: type[Self], __func: Callable[..., _T], *args: Any, **kwargs: Any
) -> Self: ...
def func1(x: Partial[int]): ...
func1(partial(int))
| Partial |
python | coleifer__peewee | tests/apsw_ext.py | {
"start": 162,
"end": 214
} | class ____(TestModel):
username = TextField()
| User |
python | huggingface__transformers | src/transformers/models/llava_next_video/modular_llava_next_video.py | {
"start": 12242,
"end": 12357
} | class ____(LlavaNextPreTrainedModel):
input_modalities = ("image", "video", "text")
| LlavaNextVideoPreTrainedModel |
python | mlflow__mlflow | examples/sktime/flavor.py | {
"start": 19987,
"end": 23456
} | class ____:
def __init__(self, sktime_model):
self.sktime_model = sktime_model
def predict(self, dataframe, params: dict[str, Any] | None = None) -> pd.DataFrame:
df_schema = dataframe.columns.values.tolist()
if len(dataframe) > 1:
raise MlflowException(
f"The provided prediction pd.DataFrame contains {len(dataframe)} rows. "
"Only 1 row should be supplied.",
error_code=INVALID_PARAMETER_VALUE,
)
# Convert the configuration dataframe into a dictionary to simplify the
# extraction of parameters passed to the sktime predcition methods.
attrs = dataframe.to_dict(orient="index").get(0)
predict_method = attrs.get("predict_method")
if not predict_method:
raise MlflowException(
f"The provided prediction configuration pd.DataFrame columns ({df_schema}) do not "
"contain the required column `predict_method` for specifying the prediction method.",
error_code=INVALID_PARAMETER_VALUE,
)
if predict_method not in SUPPORTED_SKTIME_PREDICT_METHODS:
raise MlflowException(
"Invalid `predict_method` value."
f"The supported prediction methods are {SUPPORTED_SKTIME_PREDICT_METHODS}",
error_code=INVALID_PARAMETER_VALUE,
)
# For inference parameters 'fh', 'X', 'coverage', 'alpha', and 'cov'
# the respective sktime default value is used if the value was not
# provided in the configuration dataframe.
fh = attrs.get("fh", None)
# Any model that is trained with exogenous regressor elements will need
# to provide `X` entries as a numpy ndarray to the predict method.
X = attrs.get("X", None)
# When the model is served via REST API the exogenous regressor must be
# provided as a list to the configuration DataFrame to be JSON serializable.
# Below we convert the list back to ndarray type as required by sktime
# predict methods.
if isinstance(X, list):
X = np.array(X)
# For illustration purposes only a subset of the available sktime prediction
# methods is exposed. Additional methods (e.g. predict_proba) could be added
# in a similar fashion.
if predict_method == SKTIME_PREDICT:
predictions = self.sktime_model.predict(fh=fh, X=X)
if predict_method == SKTIME_PREDICT_INTERVAL:
coverage = attrs.get("coverage", 0.9)
predictions = self.sktime_model.predict_interval(fh=fh, X=X, coverage=coverage)
if predict_method == SKTIME_PREDICT_QUANTILES:
alpha = attrs.get("alpha", None)
predictions = self.sktime_model.predict_quantiles(fh=fh, X=X, alpha=alpha)
if predict_method == SKTIME_PREDICT_VAR:
cov = attrs.get("cov", False)
predictions = self.sktime_model.predict_var(fh=fh, X=X, cov=cov)
# Methods predict_interval() and predict_quantiles() return a pandas
# MultiIndex column structure. As MLflow signature inference does not
# support MultiIndex column structure the columns must be flattened.
if predict_method in [SKTIME_PREDICT_INTERVAL, SKTIME_PREDICT_QUANTILES]:
predictions.columns = flatten_multiindex(predictions)
return predictions
| _SktimeModelWrapper |
python | django__django | django/test/client.py | {
"start": 1794,
"end": 2086
} | class ____(Exception):
"""The test client has been asked to follow a redirect loop."""
def __init__(self, message, last_response):
super().__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
| RedirectCycleError |
python | numba__numba | numba/tests/test_typedlist.py | {
"start": 28728,
"end": 30521
} | class ____(TestCase):
def test_simple_refine_append(self):
@njit
def foo():
l = List()
l.append(1)
return l
expected = foo.py_func()
got = foo()
self.assertEqual(expected, got)
self.assertEqual(list(got), [1])
self.assertEqual(typeof(got).item_type, typeof(1))
def test_simple_refine_insert(self):
@njit
def foo():
l = List()
l.insert(0, 1)
return l
expected = foo.py_func()
got = foo()
self.assertEqual(expected, got)
self.assertEqual(list(got), [1])
self.assertEqual(typeof(got).item_type, typeof(1))
def test_refine_extend_list(self):
@njit
def foo():
a = List()
b = List()
for i in range(3):
b.append(i)
a.extend(b)
return a
expected = foo.py_func()
got = foo()
self.assertEqual(expected, got)
self.assertEqual(list(got), [0, 1, 2])
self.assertEqual(typeof(got).item_type, typeof(1))
def test_refine_extend_set(self):
@njit
def foo():
l = List()
l.extend((0, 1, 2))
return l
expected = foo.py_func()
got = foo()
self.assertEqual(expected, got)
self.assertEqual(list(got), [0, 1, 2])
self.assertEqual(typeof(got).item_type, typeof(1))
def test_refine_list_extend_iter(self):
@njit
def foo():
l = List()
d = Dict()
d[0] = 0
# d.keys() provides a DictKeysIterableType
l.extend(d.keys())
return l
got = foo()
self.assertEqual(0, got[0])
| TestListInferred |
python | allegroai__clearml | clearml/binding/frameworks/tensorflow_bind.py | {
"start": 109988,
"end": 114951
} | class ____(object):
_current_task = None
__patched = None
@staticmethod
def update_current_task(task: Any, **_: Any) -> None:
PatchTensorflow2ModelIO._current_task = task
if not task:
return
PatchTensorflow2ModelIO._patch_model_checkpoint()
PostImportHookPatching.add_on_import("tensorflow", PatchTensorflow2ModelIO._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint() -> None:
if PatchTensorflow2ModelIO.__patched:
return
if "tensorflow" not in sys.modules:
return
PatchTensorflow2ModelIO.__patched = True
# noinspection PyBroadException
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.training.tracking import util # noqa
# noinspection PyBroadException
try:
util.TrackableSaver.save = _patched_call(util.TrackableSaver.save, PatchTensorflow2ModelIO._save)
except Exception:
pass
# noinspection PyBroadException
try:
util.TrackableSaver.restore = _patched_call(
util.TrackableSaver.restore, PatchTensorflow2ModelIO._restore
)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug("Failed patching tensorflow v2")
# noinspection PyBroadException
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.checkpoint import checkpoint
# noinspection PyBroadException
try:
checkpoint.TrackableSaver.save = _patched_call(
checkpoint.TrackableSaver.save, PatchTensorflow2ModelIO._save
)
except Exception:
pass
# noinspection PyBroadException
try:
checkpoint.TrackableSaver.restore = _patched_call(
checkpoint.TrackableSaver.restore, PatchTensorflow2ModelIO._restore
)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug("Failed patching tensorflow v2.11")
@staticmethod
def _save(original_fn: Callable, self: Any, file_prefix: str, *args: Any, **kwargs: Any) -> Any:
model = original_fn(self, file_prefix, *args, **kwargs)
if not PatchTensorflow2ModelIO._current_task:
return model
# store output Model
# noinspection PyBroadException
try:
WeightsFileHandler.create_output_model(
self,
file_prefix,
Framework.tensorflow,
PatchTensorflow2ModelIO._current_task,
)
except Exception:
pass
return model
@staticmethod
def _restore(original_fn: Callable, self: Any, save_path: str, *args: Any, **kwargs: Any) -> Any:
if not PatchTensorflow2ModelIO._current_task:
return original_fn(self, save_path, *args, **kwargs)
# Hack: disabled
if False and running_remotely():
# register/load model weights
# noinspection PyBroadException
try:
save_path = WeightsFileHandler.restore_weights_file(
self,
save_path,
Framework.tensorflow,
PatchTensorflow2ModelIO._current_task,
)
except Exception:
pass
# load model
return original_fn(self, save_path, *args, **kwargs)
# load model, if something is wrong, exception will be raised before we register the input model
model = original_fn(self, save_path, *args, **kwargs)
# register/load model weights
# noinspection PyBroadException
try:
WeightsFileHandler.restore_weights_file(
self,
save_path,
Framework.tensorflow,
PatchTensorflow2ModelIO._current_task,
)
except Exception:
pass
return model
def tweak_step(step: Union[int, Any]) -> int:
# noinspection PyBroadException
try:
step = int(step.numpy()) if not isinstance(step, int) else step
# unlike other frameworks, tensorflow already accounts for the iteration number
# when continuing the training. we substract the smallest iteration such that we
# don't increment the step twice number
return step - EventTrainsWriter._current_task.get_initial_iteration()
except Exception:
return step
| PatchTensorflow2ModelIO |
python | getsentry__sentry | src/sentry/dashboards/endpoints/organization_dashboards_starred.py | {
"start": 3031,
"end": 4696
} | class ____(OrganizationEndpoint):
publish_status = {"PUT": ApiPublishStatus.PRIVATE}
owner = ApiOwner.DASHBOARDS
permission_classes = (MemberPermission,)
def has_feature(self, organization: Organization, request: Request) -> bool:
return features.has(
"organizations:dashboards-starred-reordering", organization, actor=request.user
)
def put(self, request: Request, organization: Organization) -> Response:
if not request.user.is_authenticated:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not self.has_feature(organization, request):
return self.respond(status=status.HTTP_404_NOT_FOUND)
serializer = DashboardStarredOrderSerializer(
data=request.data, context={"organization": organization, "user": request.user}
)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
dashboard_ids = serializer.validated_data["dashboard_ids"]
try:
with transaction.atomic(using=router.db_for_write(DashboardFavoriteUser)):
DashboardFavoriteUser.objects.reorder_favorite_dashboards(
organization=organization,
user_id=request.user.id,
new_dashboard_positions=dashboard_ids,
)
except (IntegrityError, ValueError) as e:
sentry_sdk.capture_exception(e)
raise ParseError("Mismatch between existing and provided starred dashboards.")
return Response(status=status.HTTP_204_NO_CONTENT)
| OrganizationDashboardsStarredOrderEndpoint |
python | pytorch__pytorch | torch/utils/_python_dispatch.py | {
"start": 25526,
"end": 25620
} | class ____:
alias_set: set[str]
is_write: bool
name: str | None
@dataclass
| AliasInfo |
python | lepture__authlib | tests/flask/test_oauth2/models.py | {
"start": 300,
"end": 1767
} | class ____(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(40), unique=True, nullable=False)
def get_user_id(self):
return self.id
def check_password(self, password):
return password != "wrong"
def generate_user_info(self, scopes=None):
profile = {
"sub": str(self.id),
"name": self.username,
"given_name": "Jane",
"family_name": "Doe",
"middle_name": "Middle",
"nickname": "Jany",
"preferred_username": "j.doe",
"profile": "https://resource.test/janedoe",
"picture": "https://resource.test/janedoe/me.jpg",
"website": "https://resource.test",
"email": "janedoe@example.com",
"email_verified": True,
"gender": "female",
"birthdate": "2000-12-01",
"zoneinfo": "Europe/Paris",
"locale": "fr-FR",
"phone_number": "+1 (425) 555-1212",
"phone_number_verified": False,
"address": {
"formatted": "742 Evergreen Terrace, Springfield",
"street_address": "742 Evergreen Terrace",
"locality": "Springfield",
"region": "Unknown",
"postal_code": "1245",
"country": "USA",
},
"updated_at": 1745315119,
}
return UserInfo(profile)
| User |
python | mitmproxy__pdoc | test/testdata/misc.py | {
"start": 6958,
"end": 7094
} | class ____(sched.scheduler):
"""Test for broken links for inherited methods, https://github.com/mitmproxy/pdoc/issues/490"""
| scheduler |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 56726,
"end": 59188
} | class ____:
def __init__(self, num_required_devices):
self.num_required_devices = num_required_devices
def __call__(self, fn):
assert not hasattr(fn, "num_required_devices"), (
f"deviceCountAtLeast redefinition for {fn.__name__}"
)
fn.num_required_devices = self.num_required_devices
@wraps(fn)
def multi_fn(slf, devices, *args, **kwargs):
if len(devices) < self.num_required_devices:
reason = f"fewer than {self.num_required_devices} devices detected"
raise unittest.SkipTest(reason)
return fn(slf, devices, *args, **kwargs)
return multi_fn
# Only runs the test on the native device type (currently CPU, CUDA, Meta and PRIVATEUSE1)
def onlyNativeDeviceTypes(fn: Callable[_P, _T]) -> Callable[_P, _T]:
@wraps(fn)
def only_fn(self, *args: _P.args, **kwargs: _P.kwargs) -> _T:
if self.device_type not in NATIVE_DEVICES:
reason = f"onlyNativeDeviceTypes: doesn't run on {self.device_type}"
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return only_fn
# Only runs the test on the native device types and devices specified in the devices list
def onlyNativeDeviceTypesAnd(devices=None):
def decorator(fn):
@wraps(fn)
def only_fn(self, *args, **kwargs):
if (
self.device_type not in NATIVE_DEVICES
and self.device_type not in devices
):
reason = f"onlyNativeDeviceTypesAnd {devices} : doesn't run on {self.device_type}"
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return only_fn
return decorator
# Specifies per-dtype precision overrides.
# Ex.
#
# @precisionOverride({torch.half : 1e-2, torch.float : 1e-4})
# @dtypes(torch.half, torch.float, torch.double)
# def test_X(self, device, dtype):
# ...
#
# When the test is instantiated its class's precision will be set to the
# corresponding override, if it exists.
# self.precision can be accessed directly, and it also controls the behavior of
# functions like self.assertEqual().
#
# Note that self.precision is a scalar value, so if you require multiple
# precisions (or are working with multiple dtypes) they should be specified
# explicitly and computed using self.precision (e.g.
# self.precision *2, max(1, self.precision)).
| deviceCountAtLeast |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1214637,
"end": 1214846
} | class ____(AllSortString):
"""SortByChannelDesc schema wrapper."""
_schema = {"$ref": "#/definitions/SortByChannelDesc"}
def __init__(self, *args):
super().__init__(*args)
| SortByChannelDesc |
python | tensorflow__tensorflow | tensorflow/python/training/saver_large_variable_test.py | {
"start": 1175,
"end": 2214
} | class ____(test.TestCase):
# NOTE: This is in a separate file from saver_test.py because the
# large allocations do not play well with TSAN, and cause flaky
# failures.
def testLargeVariable(self):
save_path = os.path.join(self.get_temp_dir(), "large_variable")
with session.Session("", graph=ops.Graph()) as sess:
# Declare a variable that is exactly 2GB. This should fail,
# because a serialized checkpoint includes other header
# metadata.
with ops.device("/cpu:0"):
var = variables.Variable(
constant_op.constant(
False, shape=[2, 1024, 1024, 1024], dtype=dtypes.bool))
save = saver.Saver(
{
var.op.name: var
}, write_version=saver_pb2.SaverDef.V1)
var.initializer.run()
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"Tensor slice is too large to serialize"):
save.save(sess, save_path)
if __name__ == "__main__":
test.main()
| SaverLargeVariableTest |
python | wandb__wandb | wandb/automations/_filters/run_states.py | {
"start": 469,
"end": 848
} | class ____(LenientStrEnum): # from: StateToReport
RUNNING = "RUNNING"
FINISHED = "FINISHED"
FAILED = "FAILED"
# Convenience aliases that are equivalent when *creating* or *editing*
# the triggering event for a run state automation.
# NOTE: These may still be reported as distinct values from an *executed* automation.
CRASHED = FAILED
| ReportedRunState |
python | pytorch__pytorch | test/cpp/jit/tests_setup.py | {
"start": 784,
"end": 1102
} | class ____(FileSetup):
path = "ivalue.pt"
def setup(self):
ones = torch.ones(2, 2)
twos = torch.ones(3, 5) * 2
value = (ones, twos)
torch.save(value, self.path, _use_new_zipfile_serialization=True)
# See testTorchSaveError in test/cpp/jit/tests.h for usage
| SerializationInterop |
python | ansible__ansible | lib/ansible/_internal/_templating/_jinja_common.py | {
"start": 6931,
"end": 7401
} | class ____(Marker):
"""
An `Marker` value was previously encountered and reported.
A subsequent `Marker` value (this instance) indicates the template may have been truncated as a result.
It will only be visible if the previous `Marker` was ignored/replaced instead of being tripped, which would raise an exception.
"""
__slots__ = ()
def __init__(self) -> None:
super().__init__(hint='template potentially truncated')
| TruncationMarker |
python | sanic-org__sanic | sanic/mixins/base.py | {
"start": 132,
"end": 188
} | class ____(Protocol):
__name__: str
| DunderNameProtocol |
python | tiangolo__fastapi | tests/test_security_openid_connect_description.py | {
"start": 302,
"end": 2469
} | class ____(BaseModel):
username: str
def get_current_user(oauth_header: str = Security(oid)):
user = User(username=oauth_header)
return user
@app.get("/users/me")
def read_current_user(current_user: User = Depends(get_current_user)):
return current_user
client = TestClient(app)
def test_security_oauth2():
response = client.get("/users/me", headers={"Authorization": "Bearer footokenbar"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "Bearer footokenbar"}
def test_security_oauth2_password_other_header():
response = client.get("/users/me", headers={"Authorization": "Other footokenbar"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "Other footokenbar"}
def test_security_oauth2_password_bearer_no_header():
response = client.get("/users/me")
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
assert response.headers["WWW-Authenticate"] == "Bearer"
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"OpenIdConnect": []}],
}
}
},
"components": {
"securitySchemes": {
"OpenIdConnect": {
"type": "openIdConnect",
"openIdConnectUrl": "/openid",
"description": "OpenIdConnect security scheme",
}
}
},
}
| User |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/tryExcept1.py | {
"start": 758,
"end": 841
} | class ____(BaseException): ...
base_exceptions = (RuntimeError, NameError)
| Exception1 |
python | pytorch__pytorch | test/test_fake_tensor.py | {
"start": 44849,
"end": 48891
} | class ____(TestCase):
def test_memoized_conversion_to_meta(self):
x = torch.rand(2, 2, 2)
mode = FakeTensorMode()
self.assertTrue(mode.from_tensor(x) is mode.from_tensor(x))
def test_memoized_conversion_from_meta(self):
x = torch.rand(2, 2).to(device="meta")
mode = FakeTensorMode()
converter = mode.fake_tensor_converter
self.assertTrue(
converter.from_meta_and_device(mode, x, "cpu")
is converter.from_meta_and_device(mode, x, "cpu")
)
def test_separate_tensor_storages_view(self):
x = torch.rand(2, 2, 2)
y = x[0]
mode = FakeTensorMode()
converter = mode.fake_tensor_converter
x_conv = converter.from_real_tensor(mode, x)
y_conv = converter.from_real_tensor(mode, y)
self.assertEqual(torch._C._storage_id(x_conv), torch._C._storage_id(y_conv))
@xfailIfTorchDynamo
def test_separate_tensor_storages_non_view(self):
x = torch.rand(2, 2, 2)
y = torch.rand(4, 2)
y.set_(x.storage())
mode = FakeTensorMode()
converter = mode.fake_tensor_converter
x_conv = converter.from_real_tensor(mode, x)
y_conv = converter.from_real_tensor(mode, y)
stor_id = torch._C._storage_id(x_conv)
self.assertEqual(stor_id, torch._C._storage_id(y_conv))
del x
del x_conv
self.assertEqual(len(converter.tensor_memo), 1)
self.assertEqual(len(converter.meta_converter.storage_memo), 1)
del y
del y_conv
self.assertEqual(len(converter.tensor_memo), 0)
self.assertEqual(len(converter.meta_converter.storage_memo), 0)
def test_dead_weak_ref(self):
x = torch.rand(2, 2, 2)
y = x[0]
mode = FakeTensorMode()
converter = FakeTensorConverter()
x_conv = converter.from_real_tensor(mode, x)
x_conv_storage = x_conv.untyped_storage()
del x_conv
self.assertFalse(x in converter.tensor_memo)
y_conv = converter.from_real_tensor(mode, y)
self.assertIs(x_conv_storage, y_conv.untyped_storage())
@xfailIfTorchDynamo
def test_dead_key(self):
x = torch.rand(2, 2, 2)
mode = FakeTensorMode()
converter = FakeTensorConverter()
x_conv = converter.from_real_tensor(mode, x)
self.assertEqual(len(converter.tensor_memo), 1)
x_conv2 = converter.from_real_tensor(mode, x)
assert x_conv2 is x_conv
del x
del x_conv
del x_conv2
self.assertEqual(len(converter.tensor_memo), 0)
def test_no_active_mode(self):
with FakeTensorMode() as mode:
x = torch.empty(2, 2, device="cpu")
y = torch.empty(2, 2, device="cpu")
out = x + y
self.assertEqual(mode, out.fake_mode)
self.assertTrue(isinstance(out, FakeTensor))
self.assertEqual(out.device.type, "cpu")
def test_multiple_modes(self):
t = torch.rand([4])
t2 = torch.rand([4])
with FakeTensorMode() as m:
with FakeTensorMode() as m2:
t_fake = m.from_tensor(t)
t2_fake = m2.from_tensor(t2)
with self.assertRaisesRegex(Exception, "Mixing fake modes"):
t_fake + t2_fake
def test_separate_mode_error(self):
with FakeTensorMode():
x = torch.empty(2, 2, device="cpu")
with FakeTensorMode():
y = torch.empty(2, 2, device="cpu")
self.assertRaises(Exception, lambda: x, y)
@xfailIfTorchDynamo
def test_no_ref_cycle(self):
x = torch.rand([4])
mode = FakeTensorMode()
y = mode.from_tensor(x)
self.assertEqual(len(mode.fake_tensor_converter.tensor_memo), 1)
mode_weak = weakref.ref(mode)
y_weak = weakref.ref(mode)
del mode
del y
assert mode_weak() is None
assert y_weak() is None
make_propagate_real_tensors_cls(FakeTensorConverterTest)
| FakeTensorConverterTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py | {
"start": 19630,
"end": 20407
} | class ____(MetadataValue[str], IHaveNew):
"""Container class for path metadata entry data.
Args:
path (str): The path as a string or conforming to os.PathLike.
"""
fspath: str
def __new__(cls, path: Optional[Union[str, PathLike]]):
return super().__new__(
cls,
# coerces to str
fspath=check.opt_path_param(path, "path", default=""),
)
@public
@property
def value(self) -> str:
"""str: The wrapped path."""
return self.fspath
@public
@property
def path(self) -> str: # type: ignore
return self.fspath
@public
@whitelist_for_serdes(storage_name="NotebookMetadataEntryData")
@record_custom(field_to_new_mapping={"fspath": "path"})
| PathMetadataValue |
python | python-openxml__python-docx | src/docx/oxml/table.py | {
"start": 4603,
"end": 8588
} | class ____(BaseOxmlElement):
"""``<w:tbl>`` element."""
add_tr: Callable[[], CT_Row]
tr_lst: list[CT_Row]
tblPr: CT_TblPr = OneAndOnlyOne("w:tblPr") # pyright: ignore[reportAssignmentType]
tblGrid: CT_TblGrid = OneAndOnlyOne("w:tblGrid") # pyright: ignore[reportAssignmentType]
tr = ZeroOrMore("w:tr")
@property
def bidiVisual_val(self) -> bool | None:
"""Value of `./w:tblPr/w:bidiVisual/@w:val` or |None| if not present.
Controls whether table cells are displayed right-to-left or left-to-right.
"""
bidiVisual = self.tblPr.bidiVisual
if bidiVisual is None:
return None
return bidiVisual.val
@bidiVisual_val.setter
def bidiVisual_val(self, value: WD_TABLE_DIRECTION | None):
tblPr = self.tblPr
if value is None:
tblPr._remove_bidiVisual() # pyright: ignore[reportPrivateUsage]
else:
tblPr.get_or_add_bidiVisual().val = bool(value)
@property
def col_count(self):
"""The number of grid columns in this table."""
return len(self.tblGrid.gridCol_lst)
def iter_tcs(self):
"""Generate each of the `w:tc` elements in this table, left to right and top to
bottom.
Each cell in the first row is generated, followed by each cell in the second
row, etc.
"""
for tr in self.tr_lst:
for tc in tr.tc_lst:
yield tc
@classmethod
def new_tbl(cls, rows: int, cols: int, width: Length) -> CT_Tbl:
"""Return a new `w:tbl` element having `rows` rows and `cols` columns.
`width` is distributed evenly between the columns.
"""
return cast(CT_Tbl, parse_xml(cls._tbl_xml(rows, cols, width)))
@property
def tblStyle_val(self) -> str | None:
"""`w:tblPr/w:tblStyle/@w:val` (a table style id) or |None| if not present."""
tblStyle = self.tblPr.tblStyle
if tblStyle is None:
return None
return tblStyle.val
@tblStyle_val.setter
def tblStyle_val(self, styleId: str | None) -> None:
"""Set the value of `w:tblPr/w:tblStyle/@w:val` (a table style id) to `styleId`.
If `styleId` is None, remove the `w:tblStyle` element.
"""
tblPr = self.tblPr
tblPr._remove_tblStyle() # pyright: ignore[reportPrivateUsage]
if styleId is None:
return
tblPr._add_tblStyle().val = styleId # pyright: ignore[reportPrivateUsage]
@classmethod
def _tbl_xml(cls, rows: int, cols: int, width: Length) -> str:
col_width = Emu(width // cols) if cols > 0 else Emu(0)
return (
f"<w:tbl {nsdecls('w')}>\n"
f" <w:tblPr>\n"
f' <w:tblW w:type="auto" w:w="0"/>\n'
f' <w:tblLook w:firstColumn="1" w:firstRow="1"\n'
f' w:lastColumn="0" w:lastRow="0" w:noHBand="0"\n'
f' w:noVBand="1" w:val="04A0"/>\n'
f" </w:tblPr>\n"
f"{cls._tblGrid_xml(cols, col_width)}"
f"{cls._trs_xml(rows, cols, col_width)}"
f"</w:tbl>\n"
)
@classmethod
def _tblGrid_xml(cls, col_count: int, col_width: Length) -> str:
xml = " <w:tblGrid>\n"
for _ in range(col_count):
xml += ' <w:gridCol w:w="%d"/>\n' % col_width.twips
xml += " </w:tblGrid>\n"
return xml
@classmethod
def _trs_xml(cls, row_count: int, col_count: int, col_width: Length) -> str:
return f" <w:tr>\n{cls._tcs_xml(col_count, col_width)} </w:tr>\n" * row_count
@classmethod
def _tcs_xml(cls, col_count: int, col_width: Length) -> str:
return (
f" <w:tc>\n"
f" <w:tcPr>\n"
f' <w:tcW w:type="dxa" w:w="{col_width.twips}"/>\n'
f" </w:tcPr>\n"
f" <w:p/>\n"
f" </w:tc>\n"
) * col_count
| CT_Tbl |
python | ray-project__ray | doc/source/ray-core/doc_code/actor_checkpointing.py | {
"start": 588,
"end": 1713
} | class ____:
def __init__(self):
self.worker = Worker.remote()
self.worker_state = ray.get(self.worker.checkpoint.remote())
def execute_task_with_fault_tolerance(self):
i = 0
while True:
i = i + 1
try:
ray.get(self.worker.execute_task.remote(crash=(i % 2 == 1)))
# Checkpoint the latest worker state
self.worker_state = ray.get(self.worker.checkpoint.remote())
return
except ray.exceptions.RayActorError:
print("Actor crashes, restarting...")
# Restart the actor and restore the state
self.worker = Worker.remote()
ray.get(self.worker.restore.remote(self.worker_state))
controller = Controller()
controller.execute_task_with_fault_tolerance()
controller.execute_task_with_fault_tolerance()
assert ray.get(controller.worker.checkpoint.remote())["num_tasks_executed"] == 2
# __actor_checkpointing_manual_restart_end__
# __actor_checkpointing_auto_restart_begin__
@ray.remote(max_restarts=-1, max_task_retries=-1)
| Controller |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/quantization_ops/quantization_ops_test.py | {
"start": 14234,
"end": 14774
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_invalid_inputs(self):
inputs = constant_op.constant(
np.int32(0), shape=[3, 3, 3, 3], dtype=dtypes.qint32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
math_ops.quantize_down_and_shrink_range(
input=inputs, input_min=[], input_max=4.0,
out_type=dtypes.quint8))
| QuantizeDownAndShrinkRangeOpTest |
python | urllib3__urllib3 | test/contrib/test_socks.py | {
"start": 24218,
"end": 25928
} | class ____(IPV4SocketDummyServerTestCase):
"""
Test that TLS behaves properly for SOCKS proxies.
"""
@pytest.mark.skipif(not HAS_SSL, reason="No TLS available")
def test_basic_request(self) -> None:
def request_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
handler = handle_socks5_negotiation(sock, negotiate=False)
addr, port = next(handler)
assert addr == b"localhost"
assert port == 443
with pytest.raises(StopIteration):
handler.send(True)
# Wrap in TLS
context = better_ssl.SSLContext(ssl.PROTOCOL_SSLv23) # type: ignore[misc]
context.load_cert_chain(DEFAULT_CERTS["certfile"], DEFAULT_CERTS["keyfile"])
tls = context.wrap_socket(sock, server_side=True)
buf = b""
while True:
buf += tls.recv(65535)
if buf.endswith(b"\r\n\r\n"):
break
assert buf.startswith(b"GET / HTTP/1.1\r\n")
tls.sendall(
b"HTTP/1.1 200 OK\r\n"
b"Server: SocksTestServer\r\n"
b"Content-Length: 0\r\n"
b"\r\n"
)
tls.close()
sock.close()
self._start_server(request_handler)
proxy_url = f"socks5h://{self.host}:{self.port}"
with socks.SOCKSProxyManager(proxy_url, ca_certs=DEFAULT_CA) as pm:
response = pm.request("GET", "https://localhost")
assert response.status == 200
assert response.data == b""
assert response.headers["Server"] == "SocksTestServer"
| TestSOCKSWithTLS |
python | pypa__pipenv | pipenv/patched/pip/_internal/index/collector.py | {
"start": 12953,
"end": 16695
} | class ____:
"""
Responsible for collecting Link objects from all configured locations,
making network requests as needed.
The class's main method is its collect_sources() method.
"""
def __init__(
self,
session: PipSession,
search_scope: SearchScope,
index_lookup: Optional[Dict[str, List[str]]] = None,
) -> None:
self.search_scope = search_scope
self.session = session
self.index_lookup = index_lookup if index_lookup else {}
@classmethod
def create(
cls,
session: PipSession,
options: Values,
suppress_no_index: bool = False,
index_lookup: Optional[Dict[str, List[str]]] = None,
) -> "LinkCollector":
"""
:param session: The Session to use to make requests.
:param suppress_no_index: Whether to ignore the --no-index option
when constructing the SearchScope object.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index and not suppress_no_index:
logger.debug(
"Ignoring indexes: %s",
",".join(redact_auth_from_url(url) for url in index_urls),
)
index_urls = []
# Make sure find_links is a list before passing to create().
find_links = options.find_links or []
search_scope = SearchScope.create(
find_links=find_links,
index_urls=index_urls,
no_index=options.no_index,
index_lookup=index_lookup,
)
link_collector = LinkCollector(
session=session,
search_scope=search_scope,
index_lookup=index_lookup,
)
return link_collector
@property
def find_links(self) -> List[str]:
return self.search_scope.find_links
def fetch_response(self, location: Link) -> Optional[IndexContent]:
"""
Fetch an HTML page containing package links.
"""
return _get_index_content(location, session=self.session)
def collect_sources(
self,
project_name: str,
candidates_from_page: CandidatesFromPage,
) -> CollectedSources:
# The OrderedDict calls deduplicate sources by URL.
index_url_sources = collections.OrderedDict(
build_source(
loc,
candidates_from_page=candidates_from_page,
page_validator=self.session.is_secure_origin,
expand_dir=False,
cache_link_parsing=False,
project_name=project_name,
)
for loc in self.search_scope.get_index_urls_locations(project_name)
).values()
find_links_sources = collections.OrderedDict(
build_source(
loc,
candidates_from_page=candidates_from_page,
page_validator=self.session.is_secure_origin,
expand_dir=True,
cache_link_parsing=True,
project_name=project_name,
)
for loc in self.find_links
).values()
if logger.isEnabledFor(logging.DEBUG):
lines = [
f"* {s.link}"
for s in itertools.chain(find_links_sources, index_url_sources)
if s is not None and s.link is not None
]
lines = [
f"{len(lines)} location(s) to search "
f"for versions of {project_name}:"
] + lines
logger.debug("\n".join(lines))
return CollectedSources(
find_links=list(find_links_sources),
index_urls=list(index_url_sources),
)
| LinkCollector |
python | pandas-dev__pandas | pandas/tests/io/json/test_json_table_schema.py | {
"start": 7759,
"end": 23684
} | class ____:
def test_build_series(self):
s = pd.Series([1, 2], name="a")
s.index.name = "id"
result = s.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result["schema"]
result["schema"].pop("pandas_version")
fields = [{"name": "id", "type": "integer"}, {"name": "a", "type": "integer"}]
schema = {"fields": fields, "primaryKey": ["id"]}
expected = OrderedDict(
[
("schema", schema),
(
"data",
[
OrderedDict([("id", 0), ("a", 1)]),
OrderedDict([("id", 1), ("a", 2)]),
],
),
]
)
assert result == expected
def test_read_json_from_to_json_results(self):
# GH32383
df = DataFrame(
{
"_id": {"row_0": 0},
"category": {"row_0": "Goods"},
"recommender_id": {"row_0": 3},
"recommender_name_jp": {"row_0": "浦田"},
"recommender_name_en": {"row_0": "Urata"},
"name_jp": {"row_0": "博多人形(松尾吉将まつお よしまさ)"},
"name_en": {"row_0": "Hakata Dolls Matsuo"},
}
)
result1 = pd.read_json(StringIO(df.to_json()))
result2 = DataFrame.from_dict(json.loads(df.to_json()))
tm.assert_frame_equal(result1, df)
tm.assert_frame_equal(result2, df)
def test_to_json(self, df_table, using_infer_string):
df = df_table
df.index.name = "idx"
result = df.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result["schema"]
result["schema"].pop("pandas_version")
fields = [
{"name": "idx", "type": "integer"},
{"name": "A", "type": "integer"},
{"name": "B", "type": "string"},
{"name": "C", "type": "datetime"},
{"name": "D", "type": "duration"},
{
"constraints": {"enum": ["a", "b", "c"]},
"name": "E",
"ordered": False,
"type": "any",
},
{
"constraints": {"enum": ["a", "b", "c"]},
"name": "F",
"ordered": True,
"type": "any",
},
{"name": "G", "type": "number"},
{"name": "H", "type": "datetime", "tz": "US/Central"},
]
if using_infer_string:
fields[2] = {"name": "B", "type": "string", "extDtype": "str"}
schema = {"fields": fields, "primaryKey": ["idx"]}
data = [
OrderedDict(
[
("idx", 0),
("A", 1),
("B", "a"),
("C", "2016-01-01T00:00:00.000"),
("D", "P0DT1H0M0S"),
("E", "a"),
("F", "a"),
("G", 1.0),
("H", "2016-01-01T06:00:00.000Z"),
]
),
OrderedDict(
[
("idx", 1),
("A", 2),
("B", "b"),
("C", "2016-01-02T00:00:00.000"),
("D", "P0DT1H1M0S"),
("E", "b"),
("F", "b"),
("G", 2.0),
("H", "2016-01-02T06:00:00.000Z"),
]
),
OrderedDict(
[
("idx", 2),
("A", 3),
("B", "c"),
("C", "2016-01-03T00:00:00.000"),
("D", "P0DT1H2M0S"),
("E", "c"),
("F", "c"),
("G", 3.0),
("H", "2016-01-03T06:00:00.000Z"),
]
),
OrderedDict(
[
("idx", 3),
("A", 4),
("B", "c"),
("C", "2016-01-04T00:00:00.000"),
("D", "P0DT1H3M0S"),
("E", "c"),
("F", "c"),
("G", 4.0),
("H", "2016-01-04T06:00:00.000Z"),
]
),
]
expected = OrderedDict([("schema", schema), ("data", data)])
assert result == expected
def test_to_json_float_index(self):
data = pd.Series(1, index=[1.0, 2.0])
result = data.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
result["schema"].pop("pandas_version")
expected = OrderedDict(
[
(
"schema",
{
"fields": [
{"name": "index", "type": "number"},
{"name": "values", "type": "integer"},
],
"primaryKey": ["index"],
},
),
(
"data",
[
OrderedDict([("index", 1.0), ("values", 1)]),
OrderedDict([("index", 2.0), ("values", 1)]),
],
),
]
)
assert result == expected
def test_to_json_period_index(self):
idx = pd.period_range("2016", freq="Q-JAN", periods=2)
data = pd.Series(1, idx)
result = data.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
result["schema"].pop("pandas_version")
fields = [
{"freq": "QE-JAN", "name": "index", "type": "datetime"},
{"name": "values", "type": "integer"},
]
schema = {"fields": fields, "primaryKey": ["index"]}
data = [
OrderedDict([("index", "2015-11-01T00:00:00.000"), ("values", 1)]),
OrderedDict([("index", "2016-02-01T00:00:00.000"), ("values", 1)]),
]
expected = OrderedDict([("schema", schema), ("data", data)])
assert result == expected
def test_to_json_categorical_index(self):
data = pd.Series(1, pd.CategoricalIndex(["a", "b"]))
result = data.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
result["schema"].pop("pandas_version")
expected = OrderedDict(
[
(
"schema",
{
"fields": [
{
"name": "index",
"type": "any",
"constraints": {"enum": ["a", "b"]},
"ordered": False,
},
{"name": "values", "type": "integer"},
],
"primaryKey": ["index"],
},
),
(
"data",
[
OrderedDict([("index", "a"), ("values", 1)]),
OrderedDict([("index", "b"), ("values", 1)]),
],
),
]
)
assert result == expected
def test_date_format_raises(self, df_table):
error_msg = (
"Trying to write with `orient='table'` and `date_format='epoch'`. Table "
"Schema requires dates to be formatted with `date_format='iso'`"
)
warning_msg = (
"'epoch' date format is deprecated and will be removed in a future "
"version, please use 'iso' date format instead."
)
with pytest.raises(ValueError, match=error_msg):
with tm.assert_produces_warning(Pandas4Warning, match=warning_msg):
df_table.to_json(orient="table", date_format="epoch")
# others work
df_table.to_json(orient="table", date_format="iso")
df_table.to_json(orient="table")
def test_convert_pandas_type_to_json_field_int(self, index_or_series):
kind = index_or_series
data = [1, 2, 3]
result = convert_pandas_type_to_json_field(kind(data, name="name"))
expected = {"name": "name", "type": "integer"}
assert result == expected
def test_convert_pandas_type_to_json_field_float(self, index_or_series):
kind = index_or_series
data = [1.0, 2.0, 3.0]
result = convert_pandas_type_to_json_field(kind(data, name="name"))
expected = {"name": "name", "type": "number"}
assert result == expected
@pytest.mark.parametrize(
"dt_args,extra_exp", [({}, {}), ({"utc": True}, {"tz": "UTC"})]
)
@pytest.mark.parametrize("wrapper", [None, pd.Series])
def test_convert_pandas_type_to_json_field_datetime(
self, dt_args, extra_exp, wrapper
):
data = [1.0, 2.0, 3.0]
data = pd.to_datetime(data, **dt_args)
if wrapper is pd.Series:
data = pd.Series(data, name="values")
result = convert_pandas_type_to_json_field(data)
expected = {"name": "values", "type": "datetime"}
expected.update(extra_exp)
assert result == expected
def test_convert_pandas_type_to_json_period_range(self):
arr = pd.period_range("2016", freq="Y-DEC", periods=4)
result = convert_pandas_type_to_json_field(arr)
expected = {"name": "values", "type": "datetime", "freq": "YE-DEC"}
assert result == expected
@pytest.mark.parametrize("kind", [pd.Categorical, pd.CategoricalIndex])
@pytest.mark.parametrize("ordered", [True, False])
def test_convert_pandas_type_to_json_field_categorical(self, kind, ordered):
data = ["a", "b", "c"]
if kind is pd.Categorical:
arr = pd.Series(kind(data, ordered=ordered), name="cats")
elif kind is pd.CategoricalIndex:
arr = kind(data, ordered=ordered, name="cats")
result = convert_pandas_type_to_json_field(arr)
expected = {
"name": "cats",
"type": "any",
"constraints": {"enum": data},
"ordered": ordered,
}
assert result == expected
@pytest.mark.parametrize(
"inp,exp",
[
({"type": "integer"}, "int64"),
({"type": "number"}, "float64"),
({"type": "boolean"}, "bool"),
({"type": "duration"}, "timedelta64"),
({"type": "datetime"}, "datetime64[ns]"),
({"type": "datetime", "tz": "US/Hawaii"}, "datetime64[ns, US/Hawaii]"),
({"type": "any"}, "object"),
(
{
"type": "any",
"constraints": {"enum": ["a", "b", "c"]},
"ordered": False,
},
CategoricalDtype(categories=["a", "b", "c"], ordered=False),
),
(
{
"type": "any",
"constraints": {"enum": ["a", "b", "c"]},
"ordered": True,
},
CategoricalDtype(categories=["a", "b", "c"], ordered=True),
),
({"type": "string"}, None),
],
)
def test_convert_json_field_to_pandas_type(self, inp, exp):
field = {"name": "foo"}
field.update(inp)
assert convert_json_field_to_pandas_type(field) == exp
@pytest.mark.parametrize("inp", ["geopoint", "geojson", "fake_type"])
def test_convert_json_field_to_pandas_type_raises(self, inp):
field = {"type": inp}
with pytest.raises(
ValueError, match=f"Unsupported or invalid field type: {inp}"
):
convert_json_field_to_pandas_type(field)
def test_categorical(self):
s = pd.Series(pd.Categorical(["a", "b", "a"]))
s.index.name = "idx"
result = s.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
result["schema"].pop("pandas_version")
fields = [
{"name": "idx", "type": "integer"},
{
"constraints": {"enum": ["a", "b"]},
"name": "values",
"ordered": False,
"type": "any",
},
]
expected = OrderedDict(
[
("schema", {"fields": fields, "primaryKey": ["idx"]}),
(
"data",
[
OrderedDict([("idx", 0), ("values", "a")]),
OrderedDict([("idx", 1), ("values", "b")]),
OrderedDict([("idx", 2), ("values", "a")]),
],
),
]
)
assert result == expected
@pytest.mark.parametrize(
"idx,nm,prop",
[
(pd.Index([1]), "index", "name"),
(pd.Index([1], name="myname"), "myname", "name"),
(
pd.MultiIndex.from_product([("a", "b"), ("c", "d")]),
["level_0", "level_1"],
"names",
),
(
pd.MultiIndex.from_product(
[("a", "b"), ("c", "d")], names=["n1", "n2"]
),
["n1", "n2"],
"names",
),
(
pd.MultiIndex.from_product(
[("a", "b"), ("c", "d")], names=["n1", None]
),
["n1", "level_1"],
"names",
),
],
)
def test_set_names_unset(self, idx, nm, prop):
data = pd.Series(1, idx)
result = set_default_names(data)
assert getattr(result.index, prop) == nm
@pytest.mark.parametrize(
"idx",
[
pd.Index([], name="index"),
pd.MultiIndex.from_arrays([["foo"], ["bar"]], names=("level_0", "level_1")),
pd.MultiIndex.from_arrays([["foo"], ["bar"]], names=("foo", "level_1")),
],
)
def test_warns_non_roundtrippable_names(self, idx):
# GH 19130
df = DataFrame(index=idx)
df.index.name = "index"
with tm.assert_produces_warning(UserWarning, match="not round-trippable"):
set_default_names(df)
def test_timestamp_in_columns(self):
df = DataFrame(
[[1, 2]], columns=[pd.Timestamp("2016"), pd.Timedelta(10, unit="s")]
)
result = df.to_json(orient="table")
js = json.loads(result)
assert js["schema"]["fields"][1]["name"] == "2016-01-01T00:00:00.000"
assert js["schema"]["fields"][2]["name"] == "P0DT0H0M10S"
@pytest.mark.parametrize(
"case",
[
pd.Series([1], index=pd.Index([1], name="a"), name="a"),
DataFrame({"A": [1]}, index=pd.Index([1], name="A")),
DataFrame(
{"A": [1]},
index=pd.MultiIndex.from_arrays([["a"], [1]], names=["A", "a"]),
),
],
)
def test_overlapping_names(self, case):
with pytest.raises(ValueError, match="Overlapping"):
case.to_json(orient="table")
def test_mi_falsey_name(self):
# GH 16203
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)),
index=pd.MultiIndex.from_product([("A", "B"), ("a", "b")]),
)
result = [x["name"] for x in build_table_schema(df)["fields"]]
assert result == ["level_0", "level_1", 0, 1, 2, 3]
| TestTableOrient |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/lexer.py | {
"start": 797,
"end": 1202
} | class ____(object):
__slots__ = 'source', 'prev_position'
def __init__(self, source):
self.source = source
self.prev_position = 0
def next_token(self, reset_position=None):
if reset_position is None:
reset_position = self.prev_position
token = read_token(self.source, reset_position)
self.prev_position = token.end
return token
| Lexer |
python | fluentpython__example-code-2e | 15-more-types/cafeteria/cafeteria.py | {
"start": 80,
"end": 132
} | class ____(Beverage):
"""Any fruit juice."""
| Juice |
python | kubernetes-client__python | kubernetes/client/models/v1_self_subject_access_review_spec.py | {
"start": 383,
"end": 4837
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'non_resource_attributes': 'V1NonResourceAttributes',
'resource_attributes': 'V1ResourceAttributes'
}
attribute_map = {
'non_resource_attributes': 'nonResourceAttributes',
'resource_attributes': 'resourceAttributes'
}
def __init__(self, non_resource_attributes=None, resource_attributes=None, local_vars_configuration=None): # noqa: E501
"""V1SelfSubjectAccessReviewSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._non_resource_attributes = None
self._resource_attributes = None
self.discriminator = None
if non_resource_attributes is not None:
self.non_resource_attributes = non_resource_attributes
if resource_attributes is not None:
self.resource_attributes = resource_attributes
@property
def non_resource_attributes(self):
"""Gets the non_resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
:return: The non_resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
:rtype: V1NonResourceAttributes
"""
return self._non_resource_attributes
@non_resource_attributes.setter
def non_resource_attributes(self, non_resource_attributes):
"""Sets the non_resource_attributes of this V1SelfSubjectAccessReviewSpec.
:param non_resource_attributes: The non_resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
:type: V1NonResourceAttributes
"""
self._non_resource_attributes = non_resource_attributes
@property
def resource_attributes(self):
"""Gets the resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
:return: The resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
:rtype: V1ResourceAttributes
"""
return self._resource_attributes
@resource_attributes.setter
def resource_attributes(self, resource_attributes):
"""Sets the resource_attributes of this V1SelfSubjectAccessReviewSpec.
:param resource_attributes: The resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
:type: V1ResourceAttributes
"""
self._resource_attributes = resource_attributes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SelfSubjectAccessReviewSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SelfSubjectAccessReviewSpec):
return True
return self.to_dict() != other.to_dict()
| V1SelfSubjectAccessReviewSpec |
python | walkccc__LeetCode | solutions/1373. Maximum Sum BST in Binary Tree/1373.py | {
"start": 47,
"end": 161
} | class ____:
isBST: bool | None = False
mx: int | None = None
mn: int | None = None
summ: int | None = None
| T |
python | great-expectations__great_expectations | tests/integration/fluent/test_integration_datasource.py | {
"start": 21883,
"end": 23768
} | class ____:
context: EphemeralDataContext
datasource: SparkDatasource
dataframe: SparkDataFrame
def _validate_whole_dataframe_batch_definition(
context_source_frame: ContextPandasDataSourceAndFrame | ContextSparkDataSourceAndFrame,
):
asset = context_source_frame.datasource.add_dataframe_asset(name="asset")
bd = asset.add_batch_definition_whole_dataframe(name="bd")
suite = context_source_frame.context.suites.add(gx.ExpectationSuite(name="suite"))
suite.add_expectation(
gxe.ExpectColumnMeanToBeBetween(column="column_name", min_value=2.5, max_value=3.5)
)
validation_def = context_source_frame.context.validation_definitions.add(
gx.ValidationDefinition(
name="vd",
data=bd,
suite=suite,
)
)
result = validation_def.run(batch_parameters={"dataframe": context_source_frame.dataframe})
assert result.success
@pytest.mark.unit
def test_validate_pandas_batch_definition():
context = gx.get_context(mode="ephemeral")
datasource = context.data_sources.add_pandas(name="ds")
df = pd.DataFrame({"column_name": [1, 2, 3, 4, 5]})
_validate_whole_dataframe_batch_definition(
ContextPandasDataSourceAndFrame(context=context, datasource=datasource, dataframe=df)
)
@pytest.mark.spark
def test_validate_spark_batch_definition(
spark_session: SparkSession,
spark_df_from_pandas_df: Callable[[SparkSession, pd.DataFrame], SparkDataFrame],
):
context = gx.get_context(mode="ephemeral")
datasource = context.data_sources.add_spark(name="ds")
spark_df = spark_df_from_pandas_df(
spark_session, pd.DataFrame({"column_name": [1, 2, 3, 4, 5]})
)
_validate_whole_dataframe_batch_definition(
ContextSparkDataSourceAndFrame(context=context, datasource=datasource, dataframe=spark_df)
)
| ContextSparkDataSourceAndFrame |
python | kamyu104__LeetCode-Solutions | Python/minimum-rectangles-to-cover-points.py | {
"start": 48,
"end": 469
} | class ____(object):
def minRectanglesToCoverPoints(self, points, w):
"""
:type points: List[List[int]]
:type w: int
:rtype: int
"""
points.sort(key=lambda x: x[0])
result = 0
left = -(w+1)
for right, _ in points:
if right-left <= w:
continue
left = right
result += 1
return result
| Solution |
python | django__django | tests/composite_pk/tests.py | {
"start": 10341,
"end": 16914
} | class ____(TestCase):
fixtures = ["tenant"]
def test_objects(self):
tenant_1, tenant_2, tenant_3 = Tenant.objects.order_by("pk")
self.assertEqual(tenant_1.id, 1)
self.assertEqual(tenant_1.name, "Tenant 1")
self.assertEqual(tenant_2.id, 2)
self.assertEqual(tenant_2.name, "Tenant 2")
self.assertEqual(tenant_3.id, 3)
self.assertEqual(tenant_3.name, "Tenant 3")
user_1, user_2, user_3, user_4 = User.objects.order_by("pk")
self.assertEqual(user_1.id, 1)
self.assertEqual(user_1.tenant_id, 1)
self.assertEqual(user_1.pk, (user_1.tenant_id, user_1.id))
self.assertEqual(user_1.email, "user0001@example.com")
self.assertEqual(user_2.id, 2)
self.assertEqual(user_2.tenant_id, 1)
self.assertEqual(user_2.pk, (user_2.tenant_id, user_2.id))
self.assertEqual(user_2.email, "user0002@example.com")
self.assertEqual(user_3.id, 3)
self.assertEqual(user_3.tenant_id, 2)
self.assertEqual(user_3.pk, (user_3.tenant_id, user_3.id))
self.assertEqual(user_3.email, "user0003@example.com")
self.assertEqual(user_4.id, 4)
self.assertEqual(user_4.tenant_id, 2)
self.assertEqual(user_4.pk, (user_4.tenant_id, user_4.id))
self.assertEqual(user_4.email, "user0004@example.com")
post_1, post_2 = Post.objects.order_by("pk")
self.assertEqual(post_1.id, UUID("11111111-1111-1111-1111-111111111111"))
self.assertEqual(post_1.tenant_id, 2)
self.assertEqual(post_1.pk, (post_1.tenant_id, post_1.id))
self.assertEqual(post_2.id, UUID("ffffffff-ffff-ffff-ffff-ffffffffffff"))
self.assertEqual(post_2.tenant_id, 2)
self.assertEqual(post_2.pk, (post_2.tenant_id, post_2.id))
def assert_deserializer(self, format, users, serialized_users):
deserialized_user = list(serializers.deserialize(format, serialized_users))[0]
self.assertEqual(deserialized_user.object.email, users[0].email)
self.assertEqual(deserialized_user.object.id, users[0].id)
self.assertEqual(deserialized_user.object.tenant, users[0].tenant)
self.assertEqual(deserialized_user.object.pk, users[0].pk)
def test_serialize_user_json(self):
users = User.objects.filter(pk=(1, 1))
result = serializers.serialize("json", users)
self.assertEqual(
json.loads(result),
[
{
"model": "composite_pk.user",
"pk": [1, 1],
"fields": {
"email": "user0001@example.com",
"id": 1,
"tenant": 1,
},
}
],
)
self.assert_deserializer(format="json", users=users, serialized_users=result)
def test_serialize_user_jsonl(self):
users = User.objects.filter(pk=(1, 2))
result = serializers.serialize("jsonl", users)
self.assertEqual(
json.loads(result),
{
"model": "composite_pk.user",
"pk": [1, 2],
"fields": {
"email": "user0002@example.com",
"id": 2,
"tenant": 1,
},
},
)
self.assert_deserializer(format="jsonl", users=users, serialized_users=result)
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
def test_serialize_user_yaml(self):
users = User.objects.filter(pk=(2, 3))
result = serializers.serialize("yaml", users)
self.assertEqual(
yaml.safe_load(result),
[
{
"model": "composite_pk.user",
"pk": [2, 3],
"fields": {
"email": "user0003@example.com",
"id": 3,
"tenant": 2,
},
},
],
)
self.assert_deserializer(format="yaml", users=users, serialized_users=result)
def test_serialize_user_python(self):
users = User.objects.filter(pk=(2, 4))
result = serializers.serialize("python", users)
self.assertEqual(
result,
[
{
"model": "composite_pk.user",
"pk": [2, 4],
"fields": {
"email": "user0004@example.com",
"id": 4,
"tenant": 2,
},
},
],
)
self.assert_deserializer(format="python", users=users, serialized_users=result)
def test_serialize_user_xml(self):
users = User.objects.filter(pk=(1, 1))
result = serializers.serialize("xml", users)
self.assertIn('<object model="composite_pk.user" pk=\'["1", "1"]\'>', result)
self.assert_deserializer(format="xml", users=users, serialized_users=result)
def test_serialize_post_uuid(self):
posts = Post.objects.filter(pk=(2, "11111111-1111-1111-1111-111111111111"))
result = serializers.serialize("json", posts)
self.assertEqual(
json.loads(result),
[
{
"model": "composite_pk.post",
"pk": [2, "11111111-1111-1111-1111-111111111111"],
"fields": {
"id": "11111111-1111-1111-1111-111111111111",
"tenant": 2,
},
},
],
)
def test_serialize_datetime(self):
result = serializers.serialize("json", TimeStamped.objects.all())
self.assertEqual(
json.loads(result),
[
{
"model": "composite_pk.timestamped",
"pk": [1, "2022-01-12T05:55:14.956"],
"fields": {
"id": 1,
"created": "2022-01-12T05:55:14.956",
"text": "",
},
},
],
)
def test_invalid_pk_extra_field(self):
json = (
'[{"fields": {"email": "user0001@example.com", "id": 1, "tenant": 1}, '
'"pk": [1, 1, "extra"], "model": "composite_pk.user"}]'
)
with self.assertRaises(serializers.base.DeserializationError):
next(serializers.deserialize("json", json))
| CompositePKFixturesTests |
python | Netflix__metaflow | test/unit/inheritance/flows/comprehensive_linear_base.py | {
"start": 180,
"end": 373
} | class ____(FlowSpec):
"""Base class with parameters"""
alpha = Parameter("alpha", help="Alpha parameter", default=10)
beta = Parameter("beta", help="Beta parameter", default=5)
| BaseA |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 22722,
"end": 23149
} | class ____(VOWarning, FutureWarning):
"""
The VO catalog database retrieved from the www is designed for a
newer version of ``astropy.io.votable``. This may cause problems or limited
features performing service queries. Consider upgrading ``astropy.io.votable``
to the latest version.
"""
message_template = (
"The VO catalog database is for a later version of astropy.io.votable"
)
| W24 |
python | huggingface__transformers | src/transformers/models/phimoe/modeling_phimoe.py | {
"start": 27002,
"end": 33807
} | class ____(PhimoePreTrainedModel):
def __init__(self, config: PhimoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[PhimoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps, elementwise_affine=True)
self.rotary_emb = PhimoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def load_balancing_loss_func(
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
num_experts: Optional[int] = None,
top_k=2,
attention_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, int]:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [batch_size X sequence_length, num_experts].
num_experts:
Number of experts
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
if isinstance(gate_logits, tuple):
compute_device = gate_logits[0].device
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
.reshape(-1, top_k, num_experts)
.to(compute_device)
)
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
expert_attention_mask, dim=0
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
.reshape(-1, num_experts)
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
return overall_loss * num_experts
@auto_docstring
| PhimoeModel |
python | encode__django-rest-framework | rest_framework/renderers.py | {
"start": 8943,
"end": 14669
} | class ____(BaseRenderer):
"""
Renderers serializer data into an HTML form.
If the serializer was instantiated without an object then this will
return an HTML form not bound to any object,
otherwise it will return an HTML form with the appropriate initial data
populated from the object.
Note that rendering of field and form errors is not currently supported.
"""
media_type = 'text/html'
format = 'form'
charset = 'utf-8'
template_pack = 'rest_framework/vertical/'
base_template = 'form.html'
default_style = ClassLookupDict({
serializers.Field: {
'base_template': 'input.html',
'input_type': 'text'
},
serializers.EmailField: {
'base_template': 'input.html',
'input_type': 'email'
},
serializers.URLField: {
'base_template': 'input.html',
'input_type': 'url'
},
serializers.IntegerField: {
'base_template': 'input.html',
'input_type': 'number'
},
serializers.FloatField: {
'base_template': 'input.html',
'input_type': 'number'
},
serializers.DateTimeField: {
'base_template': 'input.html',
'input_type': 'datetime-local'
},
serializers.DateField: {
'base_template': 'input.html',
'input_type': 'date'
},
serializers.TimeField: {
'base_template': 'input.html',
'input_type': 'time'
},
serializers.FileField: {
'base_template': 'input.html',
'input_type': 'file'
},
serializers.BooleanField: {
'base_template': 'checkbox.html'
},
serializers.ChoiceField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.MultipleChoiceField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.RelatedField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.ManyRelatedField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.Serializer: {
'base_template': 'fieldset.html'
},
serializers.ListSerializer: {
'base_template': 'list_fieldset.html'
},
serializers.ListField: {
'base_template': 'list_field.html'
},
serializers.DictField: {
'base_template': 'dict_field.html'
},
serializers.FilePathField: {
'base_template': 'select.html',
},
serializers.JSONField: {
'base_template': 'textarea.html',
},
})
def render_field(self, field, parent_style):
if isinstance(field._field, serializers.HiddenField):
return ''
style = self.default_style[field].copy()
style.update(field.style)
if 'template_pack' not in style:
style['template_pack'] = parent_style.get('template_pack', self.template_pack)
style['renderer'] = self
# Get a clone of the field with text-only value representation ('' if None or False).
field = field.as_form_field()
if style.get('input_type') == 'datetime-local':
try:
format_ = field._field.format
except AttributeError:
format_ = api_settings.DATETIME_FORMAT
if format_ is not None:
# field.value is expected to be a string
# https://www.django-rest-framework.org/api-guide/fields/#datetimefield
field_value = field.value
if format_ == ISO_8601 and sys.version_info < (3, 11):
# We can drop this branch once we drop support for Python < 3.11
# https://docs.python.org/3/whatsnew/3.11.html#datetime
field_value = field_value.rstrip('Z')
field.value = (
datetime.datetime.fromisoformat(field_value) if format_ == ISO_8601
else datetime.datetime.strptime(field_value, format_)
)
# The format of an input type="datetime-local" is "yyyy-MM-ddThh:mm"
# followed by optional ":ss" or ":ss.SSS", so keep only the first three
# digits of milliseconds to avoid browser console error.
field.value = field.value.replace(tzinfo=None).isoformat(timespec="milliseconds")
if 'template' in style:
template_name = style['template']
else:
template_name = style['template_pack'].strip('/') + '/' + style['base_template']
template = loader.get_template(template_name)
context = {'field': field, 'style': style}
return template.render(context)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render serializer data and return an HTML form, as a string.
"""
renderer_context = renderer_context or {}
form = data.serializer
style = renderer_context.get('style', {})
if 'template_pack' not in style:
style['template_pack'] = self.template_pack
style['renderer'] = self
template_pack = style['template_pack'].strip('/')
template_name = template_pack + '/' + self.base_template
template = loader.get_template(template_name)
context = {
'form': form,
'style': style
}
return template.render(context)
| HTMLFormRenderer |
python | Netflix__metaflow | test/core/tests/dynamic_parameters.py | {
"start": 67,
"end": 1928
} | class ____(MetaflowTest):
PRIORITY = 3
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
PARAMETERS = {
"str_param": {"default": "str_func"},
"json_param": {"default": "json_func", "type": "JSONType"},
"nondefault_param": {"default": "lambda _: True", "type": "bool"},
}
HEADER = """
import os
os.environ['METAFLOW_RUN_NONDEFAULT_PARAM'] = 'False'
def str_func(ctx):
import os
from metaflow import current
assert_equals(current.project_name, 'dynamic_parameters_project')
assert_equals(ctx.parameter_name, 'str_param')
assert_equals(ctx.flow_name, 'DynamicParameterTestFlow')
assert_equals(ctx.user_name, os.environ['METAFLOW_USER'])
if os.path.exists('str_func.only_once'):
raise Exception("Dynamic parameter function invoked multiple times!")
with open('str_func.only_once', 'w') as f:
f.write('foo')
return 'does this work?'
def json_func(ctx):
import json
return json.dumps({'a': [8]})
@project(name='dynamic_parameters_project')
"""
@steps(0, ["singleton"], required=True)
def step_single(self):
assert_equals(self.str_param, "does this work?")
assert_equals(self.nondefault_param, False)
assert_equals(self.json_param, {"a": [8]})
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
for step in flow:
checker.assert_artifact(step.name, "nondefault_param", False)
checker.assert_artifact(step.name, "str_param", "does this work?")
checker.assert_artifact(step.name, "json_param", {"a": [8]})
| DynamicParameterTest |
python | ray-project__ray | python/ray/data/_internal/metadata_exporter.py | {
"start": 1292,
"end": 1527
} | class ____:
"""Represents a sub-stage within an operator in the DAG.
Attributes:
name: The name of the sub-stage.
id: The unique identifier of the sub-stage.
"""
name: str
id: str
@dataclass
| SubStage |
python | huggingface__transformers | tests/models/prompt_depth_anything/test_modeling_prompt_depth_anything.py | {
"start": 8777,
"end": 12546
} | class ____(unittest.TestCase):
def test_inference_wo_prompt_depth(self):
image_processor = AutoImageProcessor.from_pretrained("depth-anything/prompt-depth-anything-vits-hf")
model = PromptDepthAnythingForDepthEstimation.from_pretrained(
"depth-anything/prompt-depth-anything-vits-hf"
).to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
predicted_depth = outputs.predicted_depth
expected_shape = torch.Size([1, 756, 1008])
self.assertEqual(predicted_depth.shape, expected_shape)
expected_slice = torch.tensor(
[[0.5029, 0.5120, 0.5176], [0.4998, 0.5147, 0.5197], [0.4973, 0.5201, 0.5241]]
).to(torch_device)
self.assertTrue(torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-3))
def test_inference(self):
image_processor = AutoImageProcessor.from_pretrained("depth-anything/prompt-depth-anything-vits-hf")
model = PromptDepthAnythingForDepthEstimation.from_pretrained(
"depth-anything/prompt-depth-anything-vits-hf"
).to(torch_device)
image = prepare_img()
prompt_depth = prepare_prompt_depth()
inputs = image_processor(images=image, return_tensors="pt", prompt_depth=prompt_depth).to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
predicted_depth = outputs.predicted_depth
expected_shape = torch.Size([1, 756, 1008])
self.assertEqual(predicted_depth.shape, expected_shape)
expected_slice = torch.tensor(
[[3.0100, 3.0016, 3.0219], [3.0046, 3.0137, 3.0275], [3.0083, 3.0191, 3.0292]]
).to(torch_device)
self.assertTrue(torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-3))
@pytest.mark.torch_export_test
def test_export(self):
for strict in [False, True]:
if strict and get_torch_major_and_minor_version() == "2.7":
self.skipTest(reason="`strict=True` is currently failing with torch 2.7.")
with self.subTest(strict=strict):
if not is_torch_greater_or_equal_than_2_4:
self.skipTest(reason="This test requires torch >= 2.4 to run.")
model = (
PromptDepthAnythingForDepthEstimation.from_pretrained(
"depth-anything/prompt-depth-anything-vits-hf"
)
.to(torch_device)
.eval()
)
image_processor = AutoImageProcessor.from_pretrained("depth-anything/prompt-depth-anything-vits-hf")
image = prepare_img()
prompt_depth = prepare_prompt_depth()
inputs = image_processor(images=image, prompt_depth=prompt_depth, return_tensors="pt").to(torch_device)
exported_program = torch.export.export(
model,
args=(inputs["pixel_values"], inputs["prompt_depth"]),
strict=strict,
)
with torch.no_grad():
eager_outputs = model(**inputs)
exported_outputs = exported_program.module().forward(
inputs["pixel_values"], inputs["prompt_depth"]
)
self.assertEqual(eager_outputs.predicted_depth.shape, exported_outputs.predicted_depth.shape)
self.assertTrue(
torch.allclose(eager_outputs.predicted_depth, exported_outputs.predicted_depth, atol=1e-4)
)
| PromptDepthAnythingModelIntegrationTest |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 172804,
"end": 174002
} | class ____(Response):
"""
Response of tasks.edit_hyper_params endpoint.
:param updated: Indicates if the task was updated successfully
:type updated: int
"""
_service = "tasks"
_action = "edit_hyper_params"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(EditHyperParamsResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| EditHyperParamsResponse |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py | {
"start": 8005,
"end": 8082
} | class ____(HunYuanDenseV1RotaryEmbedding):
pass
| HunYuanMoEV1RotaryEmbedding |
python | great-expectations__great_expectations | docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/expect_batch_columns_to_be_unique.py | {
"start": 3431,
"end": 9233
} | class ____(BatchExpectation):
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_batch_columns_to_be_unique.py docstring">
"""Expect batch to contain columns with unique contents."""
# </snippet>
strict: bool = True
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_batch_columns_to_be_unique.py examples">
examples = [
{
"dataset_name": "expect_batch_columns_to_be_unique_1",
"data": {
"col1": [1, 2, 3, 4, 5],
"col2": [2, 3, 4, 5, 6],
"col3": [3, 4, 5, 6, 7],
},
"tests": [
{
"title": "strict_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"strict": True},
"out": {"success": True},
}
],
},
{
"dataset_name": "expect_batch_columns_to_be_unique_2",
"data": {
"col1": [1, 2, 3, 4, 5],
"col2": [1, 2, 3, 4, 5],
"col3": [3, 4, 5, 6, 7],
},
"tests": [
{
"title": "loose_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"strict": False},
"out": {"success": True},
},
{
"title": "strict_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"strict": True},
"out": {"success": False},
},
],
},
]
# </snippet>
# This is a tuple consisting of all Metrics necessary to evaluate the Expectation.
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_batch_columns_to_be_unique.py metric_dependencies">
metric_dependencies = ("table.columns.unique", "table.columns")
# </snippet>
# This a tuple of parameter names that can affect whether the Expectation evaluates to True or False.
success_keys = ("strict",)
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
strict = configuration.kwargs.get("strict")
# Check other things in configuration.kwargs and raise Exceptions if needed
try:
assert (
isinstance(strict, bool) or strict is None
), "strict must be a boolean value"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
# This method performs a validation of your metrics against your success keys, returning a dict indicating the success or failure of the Expectation.
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_batch_columns_to_be_unique.py validate">
def _validate(
self,
metrics: Dict,
runtime_configuration: dict | None = None,
execution_engine: ExecutionEngine | None = None,
):
unique_columns = metrics.get("table.columns.unique")
batch_columns = metrics.get("table.columns")
strict = self.configuration.kwargs.get("strict")
duplicate_columns = unique_columns.symmetric_difference(batch_columns)
if strict is True:
success = len(duplicate_columns) == 0
else:
success = len(duplicate_columns) < len(batch_columns)
return {
"success": success,
"result": {"observed_value": {"duplicate_columns": duplicate_columns}},
}
# </snippet>
# This dictionary contains metadata for display in the public gallery
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_batch_columns_to_be_unique.py library_metadata">
library_metadata = {
"tags": ["uniqueness"],
"contributors": ["@joegargery"],
}
# </snippet>
if __name__ == "__main__":
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_batch_columns_to_be_unique.py diagnostics">
ExpectBatchColumnsToBeUnique().print_diagnostic_checklist()
# </snippet>
# Note to users: code below this line is only for integration testing -- ignore!
diagnostics = ExpectBatchColumnsToBeUnique().run_diagnostics()
for check in diagnostics["tests"]:
assert check["test_passed"] is True
assert check["error_diagnostics"] is None
for check in diagnostics["errors"]:
assert check is None
for check in diagnostics["maturity_checklist"]["experimental"]:
if check["message"] == "Passes all linting checks":
continue
assert check["passed"] is True
| ExpectBatchColumnsToBeUnique |
python | allegroai__clearml | clearml/utilities/pigar/__main__.py | {
"start": 356,
"end": 8897
} | class ____(object):
_force_modules_reqs = dict()
def __init__(
self,
save_path: str,
project_path: str,
ignores: list,
installed_pkgs: dict,
comparison_operator: str = "==",
) -> None:
self._save_path = save_path
self._project_path = project_path
self._ignores = ignores
self._installed_pkgs = installed_pkgs
self._maybe_local_mods = set()
self._local_mods = dict()
self._relative_imports = set()
self._comparison_operator = comparison_operator
def extract_reqs(
self,
module_callback: Callable[[Set[str]], Set[str]] = None,
entry_point_filename: Optional[str] = None,
) -> Tuple[ReqsModules, Set[str], ReqsModules, ReqsModules]:
"""Extract requirements from project."""
def _internal_create_req(
_version: str, _pkg_name: str, _name: str, _modules: ReqsModules
) -> Tuple[str, str, Any]:
if _name not in _modules:
_modules.add(_name, _name, 0)
if not _version and _pkg_name and _pkg_name.startswith("-e "):
return (
"{} @ {}".format(_name, _pkg_name.replace("-e ", "", 1)),
_version,
_modules[_name],
)
else:
return (_pkg_name, _version, _modules[_name])
reqs = ReqsModules()
guess = ReqsModules()
local = ReqsModules()
num_local_mod = 0
if self.__module__:
# create a copy, do not change the class set
our_module = self.__module__.split(".")[0]
if our_module and our_module not in self._force_modules_reqs:
from ...version import __version__
self._force_modules_reqs[our_module] = __version__
# make the entry point absolute (relative to the root path)
if entry_point_filename and not os.path.isabs(entry_point_filename):
entry_point_filename = (
os.path.join(self._project_path, entry_point_filename) if os.path.isdir(self._project_path) else None
)
# check if the entry point script is self contained, i.e. does not use the rest of the project
if entry_point_filename and os.path.isfile(entry_point_filename) and not self._local_mods:
modules, try_imports, local_mods = project_import_modules(entry_point_filename, self._ignores)
if not local_mods:
# update the self._local_mods
self._filter_modules(modules, local_mods)
# check how many local modules we have, excluding ourselves
num_local_mod = len(set(self._local_mods.keys()) - set(self._force_modules_reqs.keys()))
# if we have any module/package we cannot find, take no chances and scan the entire project
# if we have local modules and they are not just us.
if num_local_mod or local_mods or self._relative_imports:
modules, try_imports, local_mods = project_import_modules(self._project_path, self._ignores)
else:
modules, try_imports, local_mods = project_import_modules(self._project_path, self._ignores)
if module_callback:
modules = module_callback(modules)
# Filtering modules
candidates = self._filter_modules(modules, local_mods)
# make sure we are in candidates
candidates |= set(self._force_modules_reqs.keys())
logger.info("Check module in local environment.")
reqs_module_name = []
for name in candidates:
logger.info("Checking module: %s", name)
if name in self._installed_pkgs:
pkginfo = self._installed_pkgs[name]
for pkg, (pkg_name, version) in ({"": pkginfo} if not isinstance(pkginfo, dict) else pkginfo).items():
_name = pkg or name
reqs.add(*_internal_create_req(version, pkg_name, _name, modules))
reqs_module_name.append(_name)
elif name in modules:
guess.add(name, 0, modules[name])
# add local modules, so we know what is used but not installed.
project_path = os.path.realpath(self._project_path)
for name in self._local_mods:
if name in modules and name not in reqs_module_name:
if name in self._force_modules_reqs:
reqs.add(name, self._force_modules_reqs[name], modules[name])
reqs_module_name.append(name)
continue
# if this is a base module, we have it in installed modules but package name is None
mod_path = os.path.realpath(self._local_mods[name])
if is_base_module(mod_path):
continue
# if this is a folder of our project, we can safely ignore it
if (six.PY3 and os.path.commonpath([project_path]) == os.path.commonpath([project_path, mod_path])) or (
six.PY2 and os.path.commonprefix([project_path]) == os.path.commonprefix([project_path, mod_path])
):
continue
relpath = os.path.relpath(self._local_mods[name], self._project_path)
if not relpath.startswith("."):
relpath = "." + os.path.sep + relpath
local.add(name, relpath, modules[name])
return reqs, try_imports, guess, local
@classmethod
def get_forced_modules(cls) -> dict:
return cls._force_modules_reqs
@classmethod
def add_forced_module(cls, module_name: str, module_version: str) -> None:
cls._force_modules_reqs[module_name] = module_version
def _write_reqs(self, reqs: ReqsModules) -> None:
print('Writing requirements to "{0}"'.format(self._save_path))
with open(self._save_path, "w+") as f:
f.write("# Requirements automatically generated by pigar.\n# https://github.com/damnever/pigar\n")
for k, v in reqs.sorted_items():
f.write("\n")
f.write("".join(["# {0}\n".format(c) for c in v.comments.sorted_items()]))
if k == "-e":
f.write("{0} {1}\n".format(k, v.version))
elif v:
f.write("{0} {1} {2}\n".format(k, self._comparison_operator, v.version))
else:
f.write("{0}\n".format(k))
def _best_matchs(self, name: str, pkgs: List[str]) -> List[str]:
# If imported name equals to package name.
if name in pkgs:
return [pkgs[pkgs.index(name)]]
# If not, return all possible packages.
return pkgs
def _filter_modules(self, modules: set, local_mods: set) -> set:
candidates = set()
logger.info("Filtering modules ...")
for module in modules:
logger.info("Checking module: %s", module)
if not module:
continue
if module.startswith("."):
self._relative_imports.add(module)
continue
if module in local_mods:
self._maybe_local_mods.add(module)
module_std_local = is_std_or_local_lib(module)
if module_std_local is True:
continue
if isinstance(module_std_local, str):
self._local_mods[module] = module_std_local
continue
candidates.add(module)
return candidates
def _invalid_reqs(self, reqs: ReqsModules) -> None:
for name, detail in reqs.sorted_items():
print(" {0} referenced from:\n {1}".format(name, "\n ".join(detail.comments.sorted_items())))
def _save_old_reqs(self) -> None:
if os.path.isfile(self._save_path):
with codecs.open(self._save_path, "rb", "utf-8") as f:
self._old_reqs = f.readlines()
def _reqs_diff(self) -> None:
if not hasattr(self, "_old_reqs"):
return
with codecs.open(self._save_path, "rb", "utf-8") as f:
new_reqs = f.readlines()
is_diff, diffs = lines_diff(self._old_reqs, new_reqs)
msg = "Requirements file has been covered, "
if is_diff:
msg += "there is the difference:"
print("{0}\n{1}".format(msg, "".join(diffs)), end="")
else:
msg += "no difference."
print(msg)
| GenerateReqs |
python | davidhalter__jedi | test/completion/dynamic_arrays.py | {
"start": 2876,
"end": 5498
} | class ____():
def blub(self, b):
if 1:
a = []
a.append(b)
return a
def blub2(self):
""" mapper function """
a = self.blub(1.0)
#? float()
a[0]
return a
def literal_arr(self, el):
self.a = []
self.a.append(el)
#? int()
self.a[0]
return self.a
def list_arr(self, el):
self.b = list([])
self.b.append(el)
#? float()
self.b[0]
return self.b
#? int()
C().blub(1)[0]
#? float()
C().blub2(1)[0]
#? int()
C().a[0]
#? int()
C().literal_arr(1)[0]
#? float()
C().b[0]
#? float()
C().list_arr(1.0)[0]
# -----------------
# array recursions
# -----------------
a = set([1.0])
a.update(a)
a.update([1])
#? float() int()
list(a)[0]
def first(a):
b = []
b.append(a)
b.extend(second(a))
return list(b)
def second(a):
b = []
b.extend(first(a))
return list(b)
#? float()
first(1.0)[0]
def third():
b = []
b.extend
extend()
b.extend(first())
return list(b)
#?
third()[0]
# -----------------
# set.add
# -----------------
st = {1.0}
for a in [1,2]:
st.add(a)
st.append('') # lists should not have an influence
st.add # should not cause an exception
st.add()
st = {1.0}
st.add(1)
lst = list(st)
lst.append('')
#? float() int() str()
lst[0]
# -----------------
# list setitem
# -----------------
some_lst = [int]
some_lst[3] = str
#? int
some_lst[0]
#? str
some_lst[3]
#? int str
some_lst[2]
some_lst[0] = tuple
#? tuple
some_lst[0]
#? int str tuple
some_lst[1]
some_lst2 = list([1])
some_lst2[3] = ''
#? int() str()
some_lst2[0]
#? str()
some_lst2[3]
#? int() str()
some_lst2[2]
some_lst3 = []
some_lst3[0] = 3
some_lst3[:] = '' # Is ignored for now.
#? int()
some_lst3[0]
# -----------------
# set setitem/other modifications (should not work)
# -----------------
some_set = {int}
some_set[3] = str
#? int
some_set[0]
#? int
some_set[3]
something = object()
something[3] = str
#?
something[0]
#?
something[3]
# -----------------
# dict setitem
# -----------------
some_dct = {'a': float, 1: int}
some_dct['x'] = list
some_dct['y'] = tuple
#? list
some_dct['x']
#? int float list tuple
some_dct['unknown']
#? float
some_dct['a']
some_dct = dict({'a': 1, 1: ''})
#? int() str()
some_dct['la']
#? int()
some_dct['a']
some_dct['x'] = list
some_dct['y'] = tuple
#? list
some_dct['x']
#? int() str() list tuple
some_dct['unknown']
k = 'a'
#? int()
some_dct[k]
some_other_dct = dict(some_dct, c=set)
#? int()
some_other_dct['a']
#? list
some_other_dct['x']
#? set
some_other_dct['c']
| C |
python | doocs__leetcode | solution/1800-1899/1876.Substrings of Size Three with Distinct Characters/Solution.py | {
"start": 0,
"end": 362
} | class ____:
def countGoodSubstrings(self, s: str) -> int:
ans = mask = l = 0
for r, x in enumerate(map(lambda c: ord(c) - 97, s)):
while mask >> x & 1:
y = ord(s[l]) - 97
mask ^= 1 << y
l += 1
mask |= 1 << x
ans += int(r - l + 1 >= 3)
return ans
| Solution |
python | ansible__ansible | lib/ansible/cli/vault.py | {
"start": 848,
"end": 23195
} | class ____(CLI):
""" can encrypt any structured data file used by Ansible.
This can include *group_vars/* or *host_vars/* inventory variables,
variables loaded by *include_vars* or *vars_files*, or variable files
passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*.
Role variables and defaults are also included!
Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault.
If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted.
"""
name = 'ansible-vault'
FROM_STDIN = "stdin"
FROM_ARGS = "the command line args"
FROM_PROMPT = "the interactive prompt"
def __init__(self, args):
self.b_vault_pass = None
self.b_new_vault_pass = None
self.encrypt_string_read_stdin = False
self.encrypt_secret = None
self.encrypt_vault_id = None
self.new_encrypt_secret = None
self.new_encrypt_vault_id = None
super(VaultCLI, self).__init__(args)
def init_parser(self):
super(VaultCLI, self).init_parser(
desc="encryption/decryption utility for Ansible data files",
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
common = opt_help.ArgumentParser(add_help=False)
opt_help.add_vault_options(common)
opt_help.add_verbosity_options(common)
subparsers = self.parser.add_subparsers(dest='action')
subparsers.required = True
output = opt_help.ArgumentParser(add_help=False)
output.add_argument('--output', default=None, dest='output_file',
help='output file name for encrypt or decrypt; use - for stdout',
type=opt_help.unfrack_path())
# For encrypting actions, we can also specify which of multiple vault ids should be used for encrypting
vault_id = opt_help.ArgumentParser(add_help=False)
vault_id.add_argument('--encrypt-vault-id', default=[], dest='encrypt_vault_id',
action='store', type=str,
help='the vault id used to encrypt (required if more than one vault-id is provided)')
create_parser = subparsers.add_parser('create', help='Create new vault encrypted file', parents=[vault_id, common])
create_parser.set_defaults(func=self.execute_create)
create_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
create_parser.add_argument('--skip-tty-check', default=False, help='allows editor to be opened when no tty attached',
dest='skip_tty_check', action='store_true')
decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt vault encrypted file or string', parents=[output, common])
decrypt_parser.set_defaults(func=self.execute_decrypt)
decrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
edit_parser = subparsers.add_parser('edit', help='Edit vault encrypted file', parents=[vault_id, common])
edit_parser.set_defaults(func=self.execute_edit)
edit_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
view_parser = subparsers.add_parser('view', help='View vault encrypted file', parents=[common])
view_parser.set_defaults(func=self.execute_view)
view_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt YAML file', parents=[common, output, vault_id])
encrypt_parser.set_defaults(func=self.execute_encrypt)
encrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
enc_str_parser = subparsers.add_parser('encrypt_string', help='Encrypt a string', parents=[common, output, vault_id])
enc_str_parser.set_defaults(func=self.execute_encrypt_string)
enc_str_parser.add_argument('args', help='String to encrypt', metavar='string_to_encrypt', nargs='*')
enc_str_parser.add_argument('-p', '--prompt', dest='encrypt_string_prompt',
action='store_true',
help="Prompt for the string to encrypt")
enc_str_parser.add_argument('--show-input', dest='show_string_input', default=False, action='store_true',
help='Do not hide input when prompted for the string to encrypt')
enc_str_parser.add_argument('-n', '--name', dest='encrypt_string_names',
action='append',
help="Specify the variable name")
enc_str_parser.add_argument('--stdin-name', dest='encrypt_string_stdin_name',
default=None,
help="Specify the variable name for stdin")
rekey_parser = subparsers.add_parser('rekey', help='Re-key a vault encrypted file', parents=[common, vault_id])
rekey_parser.set_defaults(func=self.execute_rekey)
rekey_new_group = rekey_parser.add_mutually_exclusive_group()
rekey_new_group.add_argument('--new-vault-password-file', default=None, dest='new_vault_password_file',
help="new vault password file for rekey", type=opt_help.unfrack_path())
rekey_new_group.add_argument('--new-vault-id', default=None, dest='new_vault_id', type=str,
help='the new vault identity to use for rekey')
rekey_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
def post_process_args(self, options):
options = super(VaultCLI, self).post_process_args(options)
display.verbosity = options.verbosity
if options.vault_ids:
for vault_id in options.vault_ids:
if u';' in vault_id:
raise AnsibleOptionsError("'%s' is not a valid vault id. The character ';' is not allowed in vault ids" % vault_id)
if getattr(options, 'output_file', None) and len(options.args) > 1:
raise AnsibleOptionsError("At most one input file may be used with the --output option")
if options.action == 'encrypt_string':
if '-' in options.args or options.encrypt_string_stdin_name or (not options.args and not options.encrypt_string_prompt):
# prompting from stdin and reading from stdin are mutually exclusive, if stdin is still provided, it is ignored
self.encrypt_string_read_stdin = True
if options.encrypt_string_prompt and self.encrypt_string_read_stdin:
# should only trigger if prompt + either - or encrypt string stdin name were provided
raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
return options
def run(self):
super(VaultCLI, self).run()
loader = DataLoader()
# set default restrictive umask
old_umask = os.umask(0o077)
vault_ids = list(context.CLIARGS['vault_ids'])
# there are 3 types of actions, those that just 'read' (decrypt, view) and only
# need to ask for a password once, and those that 'write' (create, encrypt) that
# ask for a new password and confirm it, and 'read/write (rekey) that asks for the
# old password, then asks for a new one and confirms it.
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
action = context.CLIARGS['action']
# TODO: instead of prompting for these before, we could let VaultEditor
# call a callback when it needs it.
if action in ['decrypt', 'view', 'rekey', 'edit']:
vault_secrets = self.setup_vault_secrets(loader, vault_ids=vault_ids,
vault_password_files=list(context.CLIARGS['vault_password_files']),
ask_vault_pass=context.CLIARGS['ask_vault_pass'])
if not vault_secrets:
raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
if action in ['encrypt', 'encrypt_string', 'create']:
encrypt_vault_id = None
# no --encrypt-vault-id context.CLIARGS['encrypt_vault_id'] for 'edit'
if action not in ['edit']:
encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
vault_secrets = None
vault_secrets = \
self.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(context.CLIARGS['vault_password_files']),
ask_vault_pass=context.CLIARGS['ask_vault_pass'],
create_new_password=True)
if len(vault_secrets) > 1 and not encrypt_vault_id:
raise AnsibleOptionsError("The vault-ids %s are available to encrypt. Specify the vault-id to encrypt with --encrypt-vault-id" %
','.join([x[0] for x in vault_secrets]))
if not vault_secrets:
raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
encrypt_secret = match_encrypt_secret(vault_secrets,
encrypt_vault_id=encrypt_vault_id)
# only one secret for encrypt for now, use the first vault_id and use its first secret
# TODO: exception if more than one?
self.encrypt_vault_id = encrypt_secret[0]
self.encrypt_secret = encrypt_secret[1]
if action in ['rekey']:
encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
# print('encrypt_vault_id: %s' % encrypt_vault_id)
# print('default_encrypt_vault_id: %s' % default_encrypt_vault_id)
# new_vault_ids should only ever be one item, from
# load the default vault ids if we are using encrypt-vault-id
new_vault_ids = []
if encrypt_vault_id:
new_vault_ids = default_vault_ids
if context.CLIARGS['new_vault_id']:
new_vault_ids.append(context.CLIARGS['new_vault_id'])
new_vault_password_files = []
if context.CLIARGS['new_vault_password_file']:
new_vault_password_files.append(context.CLIARGS['new_vault_password_file'])
new_vault_secrets = \
self.setup_vault_secrets(loader,
vault_ids=new_vault_ids,
vault_password_files=new_vault_password_files,
ask_vault_pass=context.CLIARGS['ask_vault_pass'],
initialize_context=False,
create_new_password=True)
if not new_vault_secrets:
raise AnsibleOptionsError("A new vault password is required to use Ansible's Vault rekey")
# There is only one new_vault_id currently and one new_vault_secret, or we
# use the id specified in --encrypt-vault-id
new_encrypt_secret = match_encrypt_secret(new_vault_secrets,
encrypt_vault_id=encrypt_vault_id)
self.new_encrypt_vault_id = new_encrypt_secret[0]
self.new_encrypt_secret = new_encrypt_secret[1]
loader.set_vault_secrets(vault_secrets)
# FIXME: do we need to create VaultEditor here? its not reused
vault = VaultLib(vault_secrets)
self.editor = VaultEditor(vault)
context.CLIARGS['func']()
# and restore umask
os.umask(old_umask)
def execute_encrypt(self):
""" encrypt the supplied file using the provided vault secret """
if not context.CLIARGS['args'] and sys.stdin.isatty():
display.display("Reading plaintext input from stdin", stderr=True)
for f in context.CLIARGS['args'] or ['-']:
# FIXME: use the correct vau
self.editor.encrypt_file(f, self.encrypt_secret,
vault_id=self.encrypt_vault_id,
output_file=context.CLIARGS['output_file'])
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
@staticmethod
def format_ciphertext_yaml(b_ciphertext, indent=None, name=None):
indent = indent or 10
block_format_var_name = ""
if name:
block_format_var_name = "%s: " % name
block_format_header = "%s!vault |" % block_format_var_name
lines = []
vault_ciphertext = to_text(b_ciphertext)
lines.append(block_format_header)
for line in vault_ciphertext.splitlines():
lines.append('%s%s' % (' ' * indent, line))
yaml_ciphertext = '\n'.join(lines)
return yaml_ciphertext
def execute_encrypt_string(self):
""" encrypt the supplied string using the provided vault secret """
b_plaintext = None
# Holds tuples (the_text, the_source_of_the_string, the variable name if its provided).
b_plaintext_list = []
# remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so
# we don't add it to the plaintext list
args = [x for x in context.CLIARGS['args'] if x != '-']
# We can prompt and read input, or read from stdin, but not both.
if context.CLIARGS['encrypt_string_prompt']:
msg = "String to encrypt: "
name = None
name_prompt_response = display.prompt('Variable name (enter for no name): ')
# TODO: enforce var naming rules?
if name_prompt_response != "":
name = name_prompt_response
# TODO: could prompt for which vault_id to use for each plaintext string
# currently, it will just be the default
hide_input = not context.CLIARGS['show_string_input']
if hide_input:
msg = "String to encrypt (hidden): "
else:
msg = "String to encrypt:"
prompt_response = display.prompt(msg, private=hide_input)
if prompt_response == '':
raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
b_plaintext = to_bytes(prompt_response)
b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name))
# read from stdin
if self.encrypt_string_read_stdin:
if sys.stdout.isatty():
display.display("Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a newline)", stderr=True)
stdin_text = sys.stdin.read()
if stdin_text == '':
raise AnsibleOptionsError('stdin was empty, not encrypting')
if sys.stdout.isatty() and not stdin_text.endswith("\n"):
display.display("\n")
b_plaintext = to_bytes(stdin_text)
# defaults to None
name = context.CLIARGS['encrypt_string_stdin_name']
b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name))
# use any leftover args as strings to encrypt
# Try to match args up to --name options
if context.CLIARGS.get('encrypt_string_names', False):
name_and_text_list = list(zip(context.CLIARGS['encrypt_string_names'], args))
# Some but not enough --name's to name each var
if len(args) > len(name_and_text_list):
# Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that.
display.display('The number of --name options do not match the number of args.',
stderr=True)
display.display('The last named variable will be "%s". The rest will not have'
' names.' % context.CLIARGS['encrypt_string_names'][-1],
stderr=True)
# Add the rest of the args without specifying a name
for extra_arg in args[len(name_and_text_list):]:
name_and_text_list.append((None, extra_arg))
# if no --names are provided, just use the args without a name.
else:
name_and_text_list = [(None, x) for x in args]
# Convert the plaintext text objects to bytestrings and collect
for name_and_text in name_and_text_list:
name, plaintext = name_and_text
if plaintext == '':
raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting')
b_plaintext = to_bytes(plaintext)
b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name))
# TODO: specify vault_id per string?
# Format the encrypted strings and any corresponding stderr output
outputs = self._format_output_vault_strings(b_plaintext_list, vault_id=self.encrypt_vault_id)
b_outs = []
for output in outputs:
err = output.get('err', None)
out = output.get('out', '')
if err:
sys.stderr.write(err)
b_outs.append(to_bytes(out))
# The output must end with a newline to play nice with terminal representation.
# Refs:
# * https://stackoverflow.com/a/729795/595220
# * https://github.com/ansible/ansible/issues/78932
b_outs.append(b'')
self.editor.write_data(b'\n'.join(b_outs), context.CLIARGS['output_file'] or '-')
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
# TODO: offer block or string ala eyaml
def _format_output_vault_strings(self, b_plaintext_list, vault_id=None):
# If we are only showing one item in the output, we don't need to included commented
# delimiters in the text
show_delimiter = False
if len(b_plaintext_list) > 1:
show_delimiter = True
# list of dicts {'out': '', 'err': ''}
output = []
# Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook.
# For more than one input, show some differentiating info in the stderr output so we can tell them
# apart. If we have a var name, we include that in the yaml
for index, b_plaintext_info in enumerate(b_plaintext_list):
# (the text itself, which input it came from, its name)
b_plaintext, src, name = b_plaintext_info
b_ciphertext = self.editor.encrypt_bytes(b_plaintext, self.encrypt_secret, vault_id=vault_id)
# block formatting
yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name)
err_msg = None
if show_delimiter:
human_index = index + 1
if name:
err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src)
else:
err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src)
output.append({'out': yaml_text, 'err': err_msg})
return output
def execute_decrypt(self):
""" decrypt the supplied file using the provided vault secret """
if not context.CLIARGS['args'] and sys.stdin.isatty():
display.display("Reading ciphertext input from stdin", stderr=True)
for f in context.CLIARGS['args'] or ['-']:
self.editor.decrypt_file(f, output_file=context.CLIARGS['output_file'])
if sys.stdout.isatty():
display.display("Decryption successful", stderr=True)
def execute_create(self):
""" create and open a file in an editor that will be encrypted with the provided vault secret when closed"""
if len(context.CLIARGS['args']) != 1:
raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
if sys.stdout.isatty() or context.CLIARGS['skip_tty_check']:
self.editor.create_file(context.CLIARGS['args'][0], self.encrypt_secret,
vault_id=self.encrypt_vault_id)
else:
raise AnsibleOptionsError("not a tty, editor cannot be opened")
def execute_edit(self):
""" open and decrypt an existing vaulted file in an editor, that will be encrypted again when closed"""
for f in context.CLIARGS['args']:
self.editor.edit_file(f)
def execute_view(self):
""" open, decrypt and view an existing vaulted file using a pager using the supplied vault secret """
for f in context.CLIARGS['args']:
# Note: vault should return byte strings because it could encrypt
# and decrypt binary files. We are responsible for changing it to
# unicode here because we are displaying it and therefore can make
# the decision that the display doesn't have to be precisely what
# the input was (leave that to decrypt instead)
plaintext = self.editor.plaintext(f)
self.pager(to_text(plaintext))
def execute_rekey(self):
""" re-encrypt a vaulted file with a new secret, the previous secret is required """
for f in context.CLIARGS['args']:
# FIXME: plumb in vault_id, use the default new_vault_secret for now
self.editor.rekey_file(f, self.new_encrypt_secret,
self.new_encrypt_vault_id)
display.display("Rekey successful", stderr=True)
def main(args=None):
VaultCLI.cli_executor(args)
if __name__ == '__main__':
main()
| VaultCLI |
python | huggingface__transformers | tests/models/musicgen_melody/test_processing_musicgen_melody.py | {
"start": 1762,
"end": 6484
} | class ____(unittest.TestCase):
def setUp(self):
# Ignore copy
self.checkpoint = "facebook/musicgen-melody"
self.tmpdirname = tempfile.mkdtemp()
def get_tokenizer(self, **kwargs):
return T5Tokenizer.from_pretrained(self.checkpoint, **kwargs)
def get_feature_extractor(self, **kwargs):
return MusicgenMelodyFeatureExtractor.from_pretrained(self.checkpoint, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
processor = MusicgenMelodyProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, T5TokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, MusicgenMelodyFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = MusicgenMelodyProcessor(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()
)
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0)
processor = MusicgenMelodyProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, T5TokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, MusicgenMelodyFeatureExtractor)
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(raw_speech, return_tensors="np")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "This is a test string"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok:
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(sequences=predicted_ids)
decoded_tok = tokenizer.decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
# Ignore copy
def test_decode_audio(self):
feature_extractor = self.get_feature_extractor(padding_side="left")
tokenizer = self.get_tokenizer()
processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
attention_mask = np.zeros((3, 20))
attention_mask[0, -5:] = 1
attention_mask[1, -20:] = 1
attention_mask[2, -10:] = 1
generated_speech = np.asarray(floats_list((3, 20)))[:, None, :]
decoded_audios = processor.batch_decode(generated_speech, attention_mask=attention_mask)
self.assertIsInstance(decoded_audios, list)
for audio in decoded_audios:
self.assertIsInstance(audio, np.ndarray)
self.assertTrue(decoded_audios[0].shape == (1, 5))
self.assertTrue(decoded_audios[1].shape == (1, 20))
self.assertTrue(decoded_audios[2].shape == (1, 10))
| MusicgenMelodyProcessorTest |
python | pytorch__pytorch | torch/utils/_python_dispatch.py | {
"start": 15117,
"end": 25526
} | class ____(Protocol):
def __tensor_flatten__(self) -> tuple[Sequence[str], object]: ...
@staticmethod
def __tensor_unflatten__(
inner_tensors: int, flatten_spec: int, outer_size: int, outer_stride: int
) -> torch.Tensor: ...
# It would be really nice to be able to say that the return of
# is_traceable_wrapper_subclass() is Intersection[torch.Tensor,
# TensorWithFlatten] - but that doesn't exist.
shape: torch._C.Size
@overload
def stride(self, dim: None = None) -> tuple[int, ...]: ...
@overload
def stride(self, dim: int) -> int: ...
@overload
def size(self, dim: None = None) -> tuple[int, ...]: ...
@overload
def size(self, dim: int) -> int: ...
def storage_offset(self) -> int: ...
def dim(self) -> int: ...
@overload
def to(
self,
dtype: torch.types._dtype,
non_blocking: bool = False,
copy: bool = False,
*,
memory_format: torch.memory_format | None = None,
) -> torch.Tensor: ...
@overload
def to(
self,
device: torch._prims_common.DeviceLikeType | None = None,
dtype: torch.types._dtype | None = None,
non_blocking: bool = False,
copy: bool = False,
*,
memory_format: torch.memory_format | None = None,
) -> torch.Tensor: ...
@overload
def to(
self,
other: torch.Tensor,
non_blocking: bool = False,
copy: bool = False,
*,
memory_format: torch.memory_format | None = None,
) -> torch.Tensor: ...
def is_traceable_wrapper_subclass(t: object) -> TypeIs[TensorWithFlatten]:
"""
Returns whether or not a tensor subclass that implements __torch_dispatch__
is 'traceable' with torch.compile.
In order for a tensor subclass to support TorchDispatchMode-style tracing in PT2,
It must implement two magic methods: __tensor_flatten__ and __tensor_unflatten__.
It is also expected to obey some restrictions around traceability and aliasing:
* The subclass's __torch_dispatch__() implementation should desugar into pytorch
dispatcher operations that can be traced into a graph.
* The subclass should use return_and_correct_aliasing(). This is needed today to make
sure that torch.compile does the right thing in a few cases around input mutation
and output aliasing.
Expected magic method signatures:
attrs, ctx = t.__tensor_flatten__()
attrs: list of attribute name strings for inner tensors
ctx: dict containing any other subclass-specific metadata needed for unflattening
t = MySubClass.__tensor_unflatten__(inner_tensors, ctx, outer_size, outer_stride)
inner_tensors: dict mapping attribute name -> tensor for each inner tensor
ctx: dict with subclass metadata in the form that __tensor_flatten__() produces
outer_size: expected (possibly symbolic) size that the returned subclass
instance should have. Note that this arg is useful for certain subclasses
that require the shape info to be constructed. In most cases, this arg can be
safely ignored.
outer_stride: expected (possibly symbolic) stride that the returned subclass
instance should have. Note that this arg is useful for certain subclasses
that require the stride info to be constructed. In most cases, this arg can be
safely ignored.
"""
is_subclass = isinstance(t, torch.Tensor) and type(t) is not torch.Tensor
return (
is_subclass
and hasattr(t, "__tensor_flatten__")
and hasattr(t, "__tensor_unflatten__")
)
def is_traceable_wrapper_subclass_type(t: type) -> TypeIs[type[TensorWithFlatten]]:
"""Same as above, but takes a type argument instead of an instance."""
return (
issubclass(t, torch.Tensor)
and t is not torch.Tensor
and hasattr(t, "__tensor_flatten__")
and hasattr(t, "__tensor_unflatten__")
)
def transform_subclass(t, callback, outer_size=None, outer_stride=None):
"""
Given a traceable, wrapper tensor subclass ``t`` that implements
``__torch_dispatch__`` and holds some inner tensors,
and a callback of type ``Callable[[str, torch.Tensor], torch.Tensor]``,
`transform_subclass` will construct a fresh instance of the wrapper tensor subclass.
It will do so by grabbing each inner tensor attribute from the wrapper,
passing them into ``callback`` to get a transformed tensor,
and putting each transformed tensor into the fresh tensor subclass instance.
Note: this function will not handle ensuring that the fresh subclass
gets the same (autograd, and aliasing) metadata as the original tensor.
This is generally handled in other subsystems like AOTAutograd.
"""
outer_size = outer_size if outer_size is not None else t.size()
outer_stride = outer_stride if outer_stride is not None else t.stride()
attrs, ctx = t.__tensor_flatten__()
transformed_tensors_dict = {}
for attr in attrs:
transformed_tensors_dict[attr] = callback(attr, getattr(t, attr))
sub = type(t).__tensor_unflatten__(
transformed_tensors_dict, ctx, outer_size, outer_stride
)
# NB: Purposefully guard here to simplify the inner / outer symbols.
# Using sym_eq() for symbolic comparison can result in an expression that's too
# difficult to guard on, so we use == here.
if sub.shape != outer_size:
raise AssertionError(
f"Expected return value from {type(t)}__tensor_unflatten__() to have "
f"shape equal to {outer_size}, but got: {sub.shape}"
)
if sub.stride() != outer_stride:
raise AssertionError(
f"Expected return value from {type(t)}__tensor_unflatten__() to have "
f"stride equal to {outer_stride}, but got: {sub.stride()}"
)
return sub
def _correct_storage_aliasing(func, schema_info, args, outs) -> None:
"""
Given: an OpOverload, a SchemaInfo (cached information from torchgen about schema),
and the inputs/outputs to the OpOverload,
this function checks to see if func is a view operator
(by checking if any of the outputs in the op's schema
are immutable aliases of inputs).
If so, this function manually aliases the storage of the output tensor
with its corresponding input tensor alias.
It does this by unsafely overwriting the storage field of the output tensor
to be the same storage as the input.
"""
if not isinstance(func, torch._ops.OpOverload):
raise AssertionError(f"func must be an OpOverload, got {type(args)}")
if not isinstance(args, tuple):
raise AssertionError(f"args must be a tuple, got {type(args)}")
if not isinstance(outs, (list, tuple)):
raise AssertionError(f"outs must be a list or tuple, got {type(args)}")
def alias_non_inplace_storage(arg, ret) -> None:
# This is hopefully a reasonable assert:
# subclasses that rely on this API for output aliasing
# should always return wrapper tensor subclasses for us to manually alias.
# in theory if a subclass that needs this API wants to sometimes return
# plain tensors, we could remove the assert and just not perform the aliasing,
# but it seems safer to learn more about this case first.
#
# Performance note: This is all just to assert that the argument and result
# types match, checking that is cheaper than is_traceable_wrapper_subclass_type,
# and multiple returns are relatively unlikely, so just check up front!
arg_type = type(arg)
ret_type = type(ret)
if arg_type is not ret_type and (
is_traceable_wrapper_subclass_type(arg_type)
or is_traceable_wrapper_subclass_type(ret_type)
):
ret_list = ret if isinstance(ret, list) else [ret]
for r in ret_list:
if type(arg) is not type(r):
raise AssertionError(
f"Called {str(func)} with input of type {type(arg)}\n"
f"and output of type {type(ret)}. But expected types to match."
)
# Need to call a non-dispatcher helper, because we explicitly do **not**
# want our subclass to intercept the set_() call.
# instead, our subclass should directly have its storage swapped out.
# we **explicitly** don't want to reset the sizes on ret, if the storage implies a size change.
# Why?
# The purpose of this API is *not* to change the size/strides of our output- we assume it's already correct.
# We just want to "fix up" the storage aliasing, without modifying or output's metadata.
# Example: out = inp.expand(inp.shape[0], inp.shape[0])
# This requires swapping the storage of out to be the same as inp,
# but we do *not* want it to change the sizes/strides that were compute for out.
if isinstance(ret, list):
for r in ret:
torch._functionalize_unsafe_set(r, arg)
else:
if not isinstance(ret, torch.Tensor):
raise AssertionError(f"expected torch.Tensor, got {type(ret)}")
torch._functionalize_unsafe_set(ret, arg)
for arg_idx, return_idx in schema_info.read_only_alias_match_indexes:
alias_non_inplace_storage(args[arg_idx], outs[return_idx])
def _get_write_alias(x) -> str | None:
alias_set = x.alias_set
if not alias_set or not x.is_write:
return None
# torchscript allows for complicated alias sets, but our dispatcher ops only really involve simple aliasing
if len(alias_set) != 1:
raise AssertionError("Expected alias_set to contain exactly one element")
# timeit says next(iter(alias_set)) is faster than list(alias_set)[0] even for
# set of size 1 on Python 3.13.
return next(iter(alias_set))
# This abstracts over the fact that in return_and_correct_aliasing,
# we sometimes use torchgen schema parsing (for aten ops, since torchscript's schema parsing is sometimes buggy),
# and sometimes use torchscript schema parsing (for custom ops, for which torchgen parsing is untested).
@dataclass
| TensorWithFlatten |
python | huggingface__transformers | src/transformers/models/data2vec/modular_data2vec_audio.py | {
"start": 8848,
"end": 8940
} | class ____(Wav2Vec2ForSequenceClassification):
pass
| Data2VecAudioForSequenceClassification |
python | python__mypy | mypyc/irbuild/classdef.py | {
"start": 10330,
"end": 12005
} | class ____(ClassBuilder):
def __init__(self, builder: IRBuilder, cdef: ClassDef) -> None:
super().__init__(builder, cdef)
# If the class is not decorated, generate an extension class for it.
self.type_obj: Value | None = allocate_class(builder, cdef)
def skip_attr_default(self, name: str, stmt: AssignmentStmt) -> bool:
"""Controls whether to skip generating a default for an attribute."""
return False
def add_method(self, fdef: FuncDef) -> None:
handle_ext_method(self.builder, self.cdef, fdef)
def add_attr(self, lvalue: NameExpr, stmt: AssignmentStmt) -> None:
# Variable declaration with no body
if isinstance(stmt.rvalue, TempNode):
return
# Only treat marked class variables as class variables.
if not (is_class_var(lvalue) or stmt.is_final_def):
return
typ = self.builder.load_native_type_object(self.cdef.fullname)
value = self.builder.accept(stmt.rvalue)
self.builder.primitive_op(
py_setattr_op, [typ, self.builder.load_str(lvalue.name), value], stmt.line
)
if self.builder.non_function_scope() and stmt.is_final_def:
self.builder.init_final_static(lvalue, value, self.cdef.name)
def finalize(self, ir: ClassIR) -> None:
attrs_with_defaults, default_assignments = find_attr_initializers(
self.builder, self.cdef, self.skip_attr_default
)
ir.attrs_with_defaults.update(attrs_with_defaults)
generate_attr_defaults_init(self.builder, self.cdef, default_assignments)
create_ne_from_eq(self.builder, self.cdef)
| ExtClassBuilder |
python | huggingface__transformers | examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py | {
"start": 4763,
"end": 10353
} | class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
dataset_name: str = field(
metadata={"help": "Path or name of the dataset (cf `load_dataset` method of the Datasets library)."}
)
target_language: str = field(
metadata={
"help": (
"The target language on which the adapter attention layers"
" should be trained on in ISO 693-3 code, e.g. `tur` for Turkish"
" Wav2Vec2's MMS ISO codes can be looked up here: https://dl.fbaipublicfiles.com/mms/misc/language_coverage_mms.html"
" If you are not training the adapter layers on a language, simply choose"
" another acronym that fits your data."
)
},
)
dataset_config_name: str = field(
default=None,
metadata={
"help": "The configuration name of the dataset to use (cf `load_dataset` method of the Datasets library)."
},
)
train_split_name: str = field(
default="train+validation",
metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to "
"'train+validation'"
)
},
)
eval_split_name: str = field(
default="test",
metadata={
"help": "The name of the evaluation data set split to use (via the datasets library). Defaults to 'test'"
},
)
audio_column_name: str = field(
default="audio",
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
)
text_column_name: str = field(
default="text",
metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached preprocessed datasets or not."},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
},
)
chars_to_ignore: Optional[list[str]] = list_field(
default=None,
metadata={"help": "A list of characters to remove from the transcripts."},
)
eval_metrics: list[str] = list_field(
default=["wer"],
metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
)
max_duration_in_seconds: float = field(
default=20.0,
metadata={
"help": (
"Filter audio files that are longer than `max_duration_in_seconds` seconds to"
" 'max_duration_in_seconds`"
)
},
)
min_duration_in_seconds: float = field(
default=0.0,
metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"},
)
preprocessing_only: bool = field(
default=False,
metadata={
"help": (
"Whether to only do data preprocessing and skip training. This is especially useful when data"
" preprocessing errors out in distributed training due to timeout. In this case, one should run the"
" preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets"
" can consequently be loaded in distributed training"
)
},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `hf auth login` (stored in `~/.huggingface`)."
)
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether to trust the execution of code from datasets/models defined on the Hub."
" This option should only be set to `True` for repositories you trust and in which you have read the"
" code, as it will execute code present on the Hub on your local machine."
)
},
)
unk_token: str = field(
default="[UNK]",
metadata={"help": "The unk token for the tokenizer"},
)
pad_token: str = field(
default="[PAD]",
metadata={"help": "The padding token for the tokenizer"},
)
word_delimiter_token: str = field(
default="|",
metadata={"help": "The word delimiter token for the tokenizer"},
)
overwrite_lang_vocab: bool = field(
default=False,
metadata={"help": ("If :obj:`True`, will overwrite existing `target_language` vocabulary of tokenizer.")},
)
@dataclass
| DataTrainingArguments |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 9632,
"end": 9735
} | class ____(OracleToleranceMixin, GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
| Difference |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 522638,
"end": 525076
} | class ____(Request):
"""
Unarchive tasks
:param ids: IDs of the tasks to unarchive
:type ids: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "unarchive_many"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "IDs of the tasks to unarchive",
"items": {"type": "string"},
"type": "array",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(self, ids, status_reason=None, status_message=None, **kwargs):
super(UnarchiveManyRequest, self).__init__(**kwargs)
self.ids = ids
self.status_reason = status_reason
self.status_message = status_message
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| UnarchiveManyRequest |
python | tensorflow__tensorflow | tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py | {
"start": 3025,
"end": 24702
} | class ____(test.TestCase):
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
"""Verifies that the ClusterSpec generates the correct proto.
We are testing this four different ways to ensure that the ClusterSpec
returned by the TPUClusterResolver behaves identically to a normal
ClusterSpec when passed into the generic ClusterSpec libraries.
Args:
cluster_spec: ClusterSpec returned by the TPUClusterResolver
expected_proto: Expected protobuf
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def mock_service_client(self, tpu_map=None):
if tpu_map is None:
tpu_map = {}
mock_locations = mock.MagicMock()
mock_locations.nodes.return_value = MockNodeClass(tpu_map)
mock_project = mock.MagicMock()
mock_project.locations.return_value = mock_locations
mock_client = mock.MagicMock()
mock_client.projects.return_value = mock_project
return mock_client
@mock.patch.object(resolver, 'is_running_in_gce', mock_is_running_in_gce)
def testCheckRunningInGceWithNoTpuName(self):
with self.assertRaisesRegex(ValueError,
'Please provide a TPU Name to connect to.*'):
resolver.TPUClusterResolver(tpu='')
@mock.patch.object(six.moves.urllib.request, 'urlopen',
mock_running_in_gce_urlopen)
def testIsRunningInGce(self):
self.assertTrue(resolver.is_running_in_gce())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testRetrieveProjectAndZoneFromMetadata(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
'health': 'HEALTHY'
}
}
cluster_resolver = resolver.TPUClusterResolver(
project=None,
zone=None,
tpu=['test-tpu-1'],
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map),
coordinator_name='coordinator')
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'coordinator'
tasks { key: 0 value: '10.128.1.2:%s' }
}
job {
name: 'worker'
tasks { key: 0 value: '10.1.2.3:8470' }
}
""" % cluster_resolver._coordinator_port
self._verifyClusterSpecEquality(actual_cluster_spec, str(expected_proto))
self.assertEqual(cluster_resolver.master(), 'grpc://10.1.2.3:8470')
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testRetrieveProjectAndZoneFromMetadataNoCoordinator(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
'health': 'HEALTHY'
}
}
cluster_resolver = resolver.TPUClusterResolver(
project=None,
zone=None,
tpu=['test-tpu-1'],
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 0 value: '10.1.2.3:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(cluster_resolver.master(), 'grpc://10.1.2.3:8470')
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testNotReadyCloudTpu(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'CREATING'
}
}
cluster_resolver = resolver.TPUClusterResolver(
project=None,
zone=None,
tpu='test-tpu-1',
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
with self.assertRaises(RuntimeError):
cluster_resolver.cluster_spec()
def testSimpleSuccessfulRetrieval(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
'health': 'HEALTHY'
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu=['test-tpu-1'],
coordinator_name='coordinator',
coordinator_address='10.128.1.5:10203',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'coordinator' tasks { key: 0 value: '10.128.1.5:10203' } }
job { name: 'worker' tasks { key: 0 value: '10.1.2.3:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(cluster_resolver.master(), 'grpc://10.1.2.3:8470')
def testFailedMetadata(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'health': 'HEALTHY'
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='nonexistent-tpu',
coordinator_name='coordinator',
coordinator_address='10.128.1.5:10203',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
with self.assertRaises(ValueError) as error_context:
cluster_resolver.cluster_spec()
self.assertIn('Could not lookup TPU metadata', str(error_context.exception))
def testNewNetworkEndpointFormat(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'state': 'READY',
'health': 'HEALTHY',
'networkEndpoints': [{
'ipAddress': '10.2.3.4',
'port': 8470,
}]
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
coordinator_name='coordinator',
coordinator_address='10.128.1.5:10203',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'coordinator' tasks { key: 0 value: '10.128.1.5:10203' } }
job { name: 'worker' tasks { key: 0 value: '10.2.3.4:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual('grpc://10.2.3.4:8470', cluster_resolver.master())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testPodResolution(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'state': 'READY',
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
cluster_resolver = resolver.TPUClusterResolver(
tpu='test-tpu-1',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map),
coordinator_name='coordinator')
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'coordinator',
tasks { key: 0 value: '10.128.1.2:%s'}
}
job {
name: 'worker'
tasks { key: 0 value: '10.2.3.4:8470' }
tasks { key: 1 value: '10.2.3.5:8470' }
tasks { key: 2 value: '10.2.3.6:8470' }
tasks { key: 3 value: '10.2.3.7:8470' }
}
""" % cluster_resolver._coordinator_port
self._verifyClusterSpecEquality(actual_cluster_spec, str(expected_proto))
self.assertEqual(cluster_resolver.master(), 'grpc://10.2.3.4:8470')
def testPodResolutionNoCoordinator(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'state': 'READY',
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.2.3.4:8470' }
tasks { key: 1 value: '10.2.3.5:8470' }
tasks { key: 2 value: '10.2.3.6:8470' }
tasks { key: 3 value: '10.2.3.7:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(cluster_resolver.master(), 'grpc://10.2.3.4:8470')
def testGetMasterNoEntries(self):
tpu_map = {}
with self.assertRaises(ValueError):
resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu=[],
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
# TODO(saeta): Convert to parameterized test when included in OSS TF.
def verifyShouldResolve(self, tpu, should_resolve):
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu=tpu,
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map={}))
self.assertEqual(should_resolve,
cluster_resolver._cloud_tpu_client.api_available(),
"TPU: '%s'" % tpu)
def testShouldResolveGrpc(self):
self.verifyShouldResolve('grpc://10.1.2.3:8470', False)
def testShouldResolveName(self):
self.verifyShouldResolve('mytpu', True)
def testShouldResolveList(self):
self.verifyShouldResolve(['myothertpu'], True)
def testShouldResolveGrpcPrefix(self):
self.verifyShouldResolve('grpctpu', True)
def testNoCallComputeMetadata(self):
cluster_resolver = resolver.TPUClusterResolver(tpu='grpc://10.1.2.3:8470')
self.assertEqual('grpc://10.1.2.3:8470', cluster_resolver.master())
self.assertEqual(
server_lib.ClusterSpec({
'worker': ['10.1.2.3:8470']
}).as_dict(),
cluster_resolver.cluster_spec().as_dict())
def testGkeEnvironmentForDonut(self):
os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'] = 'grpc://10.120.27.5:8470'
self.assertIn('KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS', os.environ)
cluster_resolver = resolver.TPUClusterResolver()
self.assertEqual(
compat.as_bytes('grpc://10.120.27.5:8470'),
compat.as_bytes(cluster_resolver.master()))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.120.27.5:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
del os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS']
def testGkeEnvironmentForPod(self):
os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'] = ('grpc://10.120.27.5:8470,'
'grpc://10.120.27.6:8470,'
'grpc://10.120.27.7:8470,'
'grpc://10.120.27.8:8470')
self.assertIn('KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS', os.environ)
cluster_resolver = resolver.TPUClusterResolver()
self.assertEqual(
compat.as_bytes('grpc://10.120.27.5:8470'),
compat.as_bytes(cluster_resolver.master()))
actual_cluster_spec = cluster_resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.120.27.5:8470' }
tasks { key: 1 value: '10.120.27.6:8470' }
tasks { key: 2 value: '10.120.27.7:8470' }
tasks { key: 3 value: '10.120.27.8:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
del os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS']
def testRpcDetectionForGrpcString(self):
cluster_resolver = resolver.TPUClusterResolver(
tpu='grpc://10.1.2.3:8470')
self.assertEqual(cluster_resolver.master(), 'grpc://10.1.2.3:8470')
def testOverrideTaskTypeAndIndexAndGetMaster(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'state': 'READY',
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual(cluster_resolver.master(), 'grpc://10.2.3.4:8470')
cluster_resolver.task_type = 'worker'
cluster_resolver.task_id = 3
self.assertEqual(cluster_resolver.master(), 'grpc://10.2.3.7:8470')
def testGetDeviceDictAndCoresWithTPUs(self):
devices = [
'/job:tpu_worker/task:0/device:TPU:0',
'/job:tpu_worker/task:1/device:TPU:1',
'/job:tpu_worker/task:2/device:TPU:0',
'/job:tpu_worker/task:3/device:TPU:1',
'/job:tpu_worker/task:0/device:TPU:4',
'/job:tpu_worker/task:1/device:TPU:5',
'/job:tpu_worker/task:2/device:TPU:4',
'/job:tpu_worker/task:3/device:TPU:5',
]
device_list = [
session._DeviceAttributes(name, 'TPU', 1024, 0) for name in devices
]
device_details = resolver.TPUClusterResolver._get_device_dict_and_cores(
device_list)
self.assertEqual(device_details.total_cores, 8)
self.assertEqual(device_details.device_map,
{'0': ['0', '4'],
'1': ['1', '5'],
'2': ['0', '4'],
'3': ['1', '5']})
def testGetDeviceDictAndCoresWithCPUsAndGPUs(self):
devices = [
'/job:tpu_worker/task:0/device:CPU:0',
'/job:tpu_worker/task:1/device:CPU:0',
'/job:tpu_worker/task:2/device:CPU:0',
'/job:tpu_worker/task:3/device:CPU:0',
'/job:tpu_worker/task:0/device:GPU:1',
'/job:tpu_worker/task:1/device:GPU:1',
'/job:tpu_worker/task:2/device:GPU:1',
'/job:tpu_worker/task:3/device:GPU:1',
]
device_list = [
session._DeviceAttributes(name, 'XLA', 1024, 0) for name in devices
]
device_dict, num_cores =\
resolver.TPUClusterResolver._get_device_dict_and_cores(device_list)
self.assertEqual(num_cores, 0)
self.assertEqual(device_dict, {})
def testVerifySameCoreCount(self):
self.assertEqual(
resolver.TPUClusterResolver
._verify_and_return_same_core_count({0: [0, 1, 2, 3, 4, 5, 6, 7]}), 8)
self.assertEqual(
resolver.TPUClusterResolver
._verify_and_return_same_core_count({
0: [0, 1],
1: [2, 3]
}), 2)
with self.assertRaises(RuntimeError):
resolver.TPUClusterResolver._verify_and_return_same_core_count(
{
0: [0],
1: [1, 2]
})
@mock.patch.object(config, 'list_logical_devices')
@mock.patch.object(session.BaseSession, 'list_devices')
@mock.patch.object(resolver, 'is_running_in_gce', mock_is_not_running_in_gce)
def testNumAcceleratorsSuccess(self, mock_list_devices,
mock_eager_list_devices):
devices = [
context.LogicalDevice('/job:tpu_worker/task:0/device:TPU:0', 'TPU'),
context.LogicalDevice('/job:tpu_worker/task:1/device:TPU:1', 'TPU'),
context.LogicalDevice('/job:tpu_worker/task:2/device:TPU:0', 'TPU'),
context.LogicalDevice('/job:tpu_worker/task:3/device:TPU:1', 'TPU'),
context.LogicalDevice('/job:tpu_worker/task:0/device:TPU:4', 'TPU'),
context.LogicalDevice('/job:tpu_worker/task:1/device:TPU:5', 'TPU'),
context.LogicalDevice('/job:tpu_worker/task:2/device:TPU:4', 'TPU'),
context.LogicalDevice('/job:tpu_worker/task:3/device:TPU:5', 'TPU'),
]
device_list = [
session._DeviceAttributes(d.name, d.device_type, 1024, 0)
for d in devices
]
mock_eager_list_devices.return_value = devices
mock_list_devices.return_value = device_list
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'state': 'READY',
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual(cluster_resolver.num_accelerators(), {'TPU': 2})
@mock.patch.object(config, 'list_logical_devices')
@mock.patch.object(session.BaseSession, 'list_devices')
@mock.patch.object(resolver, 'is_running_in_gce', mock_is_not_running_in_gce)
def testNumAcceleratorsRetryFailure(self, mock_list_devices,
mock_eager_list_devices):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
cluster_resolver = resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
service=self.mock_service_client(tpu_map=tpu_map))
mock_list_devices.side_effect = errors.DeadlineExceededError(
None, None, 'timeout')
mock_eager_list_devices.side_effect = errors.DeadlineExceededError(
None, None, 'timeout')
with self.assertRaises(RuntimeError):
cluster_resolver.num_accelerators()
def testLocalTpuResolver(self):
cr = resolver.TPUClusterResolver(tpu='local')
self.assertEqual(cr.get_master(), '')
def testTpuTopology(self):
cluster_resolver = resolver.TPUClusterResolver(tpu='local')
self.assertIsNone(cluster_resolver._tpu_topology)
# Test set with tpu topology proto.
cluster_resolver.set_tpu_topology(
serialized_tpu_topology=topology_pb2.TopologyProto(
mesh_shape=[1, 1, 1, 1]).SerializeToString())
self.assertIsInstance(cluster_resolver.tpu_hardware_feature,
topology_pb2.TPUHardwareFeature)
def testEnvironment(self):
cluster_resolver = resolver.TPUClusterResolver(tpu='local')
self.assertEqual(cluster_resolver.environment, '')
if __name__ == '__main__':
test.main()
| TPUClusterResolverTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess4.py | {
"start": 797,
"end": 863
} | class ____(Mixin2):
def must_have(self) -> None:
pass
| A2 |
python | kamyu104__LeetCode-Solutions | Python/number-of-dice-rolls-with-target-sum.py | {
"start": 37,
"end": 602
} | class ____(object):
def numRollsToTarget(self, d, f, target):
"""
:type d: int
:type f: int
:type target: int
:rtype: int
"""
MOD = 10**9+7
dp = [[0 for _ in xrange(target+1)] for _ in xrange(2)]
dp[0][0] = 1
for i in xrange(1, d+1):
dp[i%2] = [0 for _ in xrange(target+1)]
for k in xrange(1, f+1):
for j in xrange(k, target+1):
dp[i%2][j] = (dp[i%2][j] + dp[(i-1)%2][j-k]) % MOD
return dp[d%2][target] % MOD
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_pattern08.py | {
"start": 315,
"end": 3540
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_pattern08.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [91631616, 91633152]
data = [
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
worksheet.write_column("E1", data[4])
worksheet.write_column("F1", data[5])
worksheet.write_column("G1", data[6])
worksheet.write_column("H1", data[7])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$3",
"pattern": {
"pattern": "percent_5",
"fg_color": "yellow",
"bg_color": "red",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$B$1:$B$3",
"pattern": {
"pattern": "percent_50",
"fg_color": "#FF0000",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$C$1:$C$3",
"pattern": {
"pattern": "light_downward_diagonal",
"fg_color": "#FFC000",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$D$1:$D$3",
"pattern": {
"pattern": "light_vertical",
"fg_color": "#FFFF00",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$E$1:$E$3",
"pattern": {
"pattern": "dashed_downward_diagonal",
"fg_color": "#92D050",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$F$1:$F$3",
"pattern": {
"pattern": "zigzag",
"fg_color": "#00B050",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$G$1:$G$3",
"pattern": {
"pattern": "divot",
"fg_color": "#00B0F0",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$H$1:$H$3",
"pattern": {
"pattern": "small_grid",
"fg_color": "#0070C0",
},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.