language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 5120,
"end": 5203
} | class ____(HTTPSuccessful):
status_code = 204
empty_body = True
| HTTPNoContent |
python | google__python-fire | fire/decorators_test.py | {
"start": 695,
"end": 1113
} | class ____:
"""A class for testing decorated functions without default values."""
@decorators.SetParseFns(count=int)
def double(self, count):
return 2 * count
@decorators.SetParseFns(count=float)
def triple(self, count):
return 3 * count
@decorators.SetParseFns(int)
def quadruple(self, count):
return 4 * count
@decorators.SetParseFns(int)
def double(count):
return 2 * count
| NoDefaults |
python | django__django | tests/model_inheritance/models.py | {
"start": 4319,
"end": 4355
} | class ____(Child):
pass
| GrandChild |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/utils/test_batcher.py | {
"start": 1912,
"end": 7967
} | class ____:
@pytest.mark.asyncio
async def test_batch(self):
count = 0
batcher = TestBatcher(fake_generator())
async for x in batcher.stream():
count += 1
assert x["num_generated_tokens"] == 100
assert x["generated_text"] == TEXT_VALUE * 100
# Should only have been called once
assert count == 1
assert batcher.queue.empty()
@pytest.mark.asyncio
async def test_batch_timing(self):
count = 0
batcher = TestBatcher(fake_generator_slow(num_batches=10))
async for _x in batcher.stream():
count += 1
assert 9 <= count <= 12, (
"Count should have been called between 9 and 12 times, "
"because each iteration takes 1/10th of an interval to yield."
)
assert batcher.queue.empty()
@pytest.mark.asyncio
async def test_batch_last_return_is_immediate(self):
"""Test that we don't wait the entire interval for
the last response if it returns quickly."""
count = 0
token_count = 0
batcher = TestBatcher(fake_generator_slow_last_return_immediate())
last_response = None
async for _x in batcher.stream():
count += 1
token_count += _x["num_generated_tokens"]
last_response = _x
assert (
last_response["generated_text"] == TEXT_VALUE + FINAL_TEXT_VALUE
), "the last generated response should be batched with previous one"
assert token_count == 11, "token_count should be exactly 11"
assert (
count == 10
), "Count should have been called exactly 10 times (as many as we generated - 1)"
assert batcher.queue.empty()
@pytest.mark.asyncio
async def test_batch_no_interval(self):
"""Check that the class creates only one batch if there's no interval."""
batcher = TestBatcher(fake_generator_slow(num_batches=10), interval_ms=None)
count = 0
async for _x in batcher.stream():
count += 1
assert count == 1
assert batcher.queue.empty()
@pytest.mark.asyncio
@pytest.mark.parametrize("interval_ms", [100, None])
async def test_exception_propagation(self, interval_ms: Optional[float]):
"""Test that exceptions are propagated correctly to parent."""
async def generator_should_raise():
for _i in range(100):
await asyncio.sleep(0.01)
yield dict(num_generated_tokens=1, generated_text=TEXT_VALUE)
raise ValueError()
count = 0
batched = TestBatcher(generator_should_raise(), interval_ms=interval_ms)
async def parent():
nonlocal count
nonlocal batched
async for _x in batched.stream():
count += 1
task = asyncio.create_task(parent())
await asyncio.sleep(0.2)
with pytest.raises(ValueError):
task.result()
assert count == 1
@pytest.mark.asyncio
@pytest.mark.parametrize("interval_ms", [100, None])
@pytest.mark.parametrize("to_cancel", ["parent", "inner", "stream"])
async def test_cancellation(self, interval_ms: Optional[float], to_cancel: str):
"""There are 3 ways cancellation can happen:
1. The parent is cancelled
2. The generator is cancelled
3. The stream task is directly cancelled.
Make sure all associated tasks are cancelled in each instance.
"""
async def generator_should_raise():
with pytest.raises(asyncio.CancelledError):
for _i in range(100):
await asyncio.sleep(0.01)
yield dict(num_generated_tokens=1, generated_text=TEXT_VALUE)
if to_cancel == "inner":
raise asyncio.CancelledError()
batched = TestBatcher(generator_should_raise(), interval_ms=interval_ms)
async def parent():
nonlocal batched
async for _x in batched.stream():
pass
task = asyncio.create_task(parent())
await asyncio.sleep(0.2)
cancel_task = {
"parent": task,
"stream": batched.read_task,
}.get(to_cancel)
if cancel_task:
assert not task.done()
assert not batched.read_task.done()
cancel_task.cancel()
await asyncio.sleep(0.3)
assert batched.read_task.done(), "Read task should be completed"
assert task.done(), "All tasks should be done"
# Inner task is checked automatically with pytest.raises
@pytest.mark.asyncio
async def test_stable_streaming(self):
"""Test that the batcher does not add jitter to the stream when interval_ms is 0"""
async def generator():
for i in range(100):
await asyncio.sleep(0.01)
yield i
concurrency = 10
output_intervals = await asyncio.gather(
*[
count_interval_ms_from_stream(
Batcher(generator(), interval_ms=0).stream()
)
for _ in range(concurrency)
]
)
mean_batcher_interval = np.mean(output_intervals)
std_batcher_interval = np.std(output_intervals)
generator_intervals = await asyncio.gather(
*[count_interval_ms_from_stream(generator()) for _ in range(concurrency)]
)
mean_generator_interval = np.mean(generator_intervals)
std_generator_interval = np.std(generator_intervals)
assert np.isclose(
mean_batcher_interval, mean_generator_interval, rtol=0.1
), f"{mean_batcher_interval=}, {mean_generator_interval=}"
assert np.isclose(
std_batcher_interval, std_generator_interval, atol=0.1
), f"{std_batcher_interval=}, {std_generator_interval=}"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestBatching |
python | doocs__leetcode | solution/2100-2199/2187.Minimum Time to Complete Trips/Solution.py | {
"start": 0,
"end": 233
} | class ____:
def minimumTime(self, time: List[int], totalTrips: int) -> int:
mx = min(time) * totalTrips
return bisect_left(
range(mx), totalTrips, key=lambda x: sum(x // v for v in time)
)
| Solution |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query.py | {
"start": 3753,
"end": 3853
} | class ____:
def method1():
return 0
def method2():
return 0
| ClassTest2_Alarm1 |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 59225,
"end": 59355
} | class ____:
enabled: bool
config: BoolInvertedIndexConfig
# Individual Value Type Classes
@dataclass
| BoolInvertedIndexType |
python | doocs__leetcode | solution/1000-1099/1024.Video Stitching/Solution.py | {
"start": 0,
"end": 444
} | class ____:
def videoStitching(self, clips: List[List[int]], time: int) -> int:
last = [0] * time
for a, b in clips:
if a < time:
last[a] = max(last[a], b)
ans = mx = pre = 0
for i, v in enumerate(last):
mx = max(mx, v)
if mx <= i:
return -1
if pre == i:
ans += 1
pre = mx
return ans
| Solution |
python | encode__django-rest-framework | tests/test_urlpatterns.py | {
"start": 479,
"end": 8051
} | class ____(TestCase):
"""
Tests `format_suffix_patterns` against different URLPatterns to ensure the
URLs still resolve properly, including any captured parameters.
"""
def _resolve_urlpatterns(self, urlpatterns, test_paths, allowed=None):
factory = APIRequestFactory()
try:
urlpatterns = format_suffix_patterns(urlpatterns, allowed=allowed)
except Exception:
self.fail("Failed to apply `format_suffix_patterns` on the supplied urlpatterns")
resolver = URLResolver(RegexPattern(r'^/'), urlpatterns)
for test_path in test_paths:
try:
test_path, expected_resolved = test_path
except (TypeError, ValueError):
expected_resolved = True
request = factory.get(test_path.path)
try:
callback, callback_args, callback_kwargs = resolver.resolve(request.path_info)
except Resolver404:
callback, callback_args, callback_kwargs = (None, None, None)
if expected_resolved:
raise
except Exception:
self.fail("Failed to resolve URL: %s" % request.path_info)
if not expected_resolved:
assert callback is None
continue
assert callback_args == test_path.args
assert callback_kwargs == test_path.kwargs
def _test_trailing_slash(self, urlpatterns):
test_paths = [
(URLTestPath('/test.api', (), {'format': 'api'}), True),
(URLTestPath('/test/.api', (), {'format': 'api'}), False),
(URLTestPath('/test.api/', (), {'format': 'api'}), True),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_trailing_slash(self):
urlpatterns = [
path('test/', dummy_view),
]
self._test_trailing_slash(urlpatterns)
def test_trailing_slash_django2(self):
urlpatterns = [
path('test/', dummy_view),
]
self._test_trailing_slash(urlpatterns)
def _test_format_suffix(self, urlpatterns):
test_paths = [
URLTestPath('/test', (), {}),
URLTestPath('/test.api', (), {'format': 'api'}),
URLTestPath('/test.asdf', (), {'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_format_suffix(self):
urlpatterns = [
path('test', dummy_view),
]
self._test_format_suffix(urlpatterns)
def test_format_suffix_django2(self):
urlpatterns = [
path('test', dummy_view),
]
self._test_format_suffix(urlpatterns)
def test_format_suffix_django2_args(self):
urlpatterns = [
path('convtest/<int:pk>', dummy_view),
re_path(r'^retest/(?P<pk>[0-9]+)$', dummy_view),
]
test_paths = [
URLTestPath('/convtest/42', (), {'pk': 42}),
URLTestPath('/convtest/42.api', (), {'pk': 42, 'format': 'api'}),
URLTestPath('/convtest/42.asdf', (), {'pk': 42, 'format': 'asdf'}),
URLTestPath('/retest/42', (), {'pk': '42'}),
URLTestPath('/retest/42.api', (), {'pk': '42', 'format': 'api'}),
URLTestPath('/retest/42.asdf', (), {'pk': '42', 'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def _test_default_args(self, urlpatterns):
test_paths = [
URLTestPath('/test', (), {'foo': 'bar', }),
URLTestPath('/test.api', (), {'foo': 'bar', 'format': 'api'}),
URLTestPath('/test.asdf', (), {'foo': 'bar', 'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_default_args(self):
urlpatterns = [
path('test', dummy_view, {'foo': 'bar'}),
]
self._test_default_args(urlpatterns)
def test_default_args_django2(self):
urlpatterns = [
path('test', dummy_view, {'foo': 'bar'}),
]
self._test_default_args(urlpatterns)
def _test_included_urls(self, urlpatterns):
test_paths = [
URLTestPath('/test/path', (), {'foo': 'bar', }),
URLTestPath('/test/path.api', (), {'foo': 'bar', 'format': 'api'}),
URLTestPath('/test/path.asdf', (), {'foo': 'bar', 'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_included_urls(self):
nested_patterns = [
path('path', dummy_view)
]
urlpatterns = [
path('test/', include(nested_patterns), {'foo': 'bar'}),
]
self._test_included_urls(urlpatterns)
def test_included_urls_mixed(self):
nested_patterns = [
path('path/<int:child>', dummy_view),
re_path(r'^re_path/(?P<child>[0-9]+)$', dummy_view)
]
urlpatterns = [
re_path(r'^pre_path/(?P<parent>[0-9]+)/', include(nested_patterns), {'foo': 'bar'}),
path('ppath/<int:parent>/', include(nested_patterns), {'foo': 'bar'}),
]
test_paths = [
# parent re_path() nesting child path()
URLTestPath('/pre_path/87/path/42', (), {'parent': '87', 'child': 42, 'foo': 'bar', }),
URLTestPath('/pre_path/87/path/42.api', (), {'parent': '87', 'child': 42, 'foo': 'bar', 'format': 'api'}),
URLTestPath('/pre_path/87/path/42.asdf', (), {'parent': '87', 'child': 42, 'foo': 'bar', 'format': 'asdf'}),
# parent path() nesting child re_path()
URLTestPath('/ppath/87/re_path/42', (), {'parent': 87, 'child': '42', 'foo': 'bar', }),
URLTestPath('/ppath/87/re_path/42.api', (), {'parent': 87, 'child': '42', 'foo': 'bar', 'format': 'api'}),
URLTestPath('/ppath/87/re_path/42.asdf', (), {'parent': 87, 'child': '42', 'foo': 'bar', 'format': 'asdf'}),
# parent path() nesting child path()
URLTestPath('/ppath/87/path/42', (), {'parent': 87, 'child': 42, 'foo': 'bar', }),
URLTestPath('/ppath/87/path/42.api', (), {'parent': 87, 'child': 42, 'foo': 'bar', 'format': 'api'}),
URLTestPath('/ppath/87/path/42.asdf', (), {'parent': 87, 'child': 42, 'foo': 'bar', 'format': 'asdf'}),
# parent re_path() nesting child re_path()
URLTestPath('/pre_path/87/re_path/42', (), {'parent': '87', 'child': '42', 'foo': 'bar', }),
URLTestPath('/pre_path/87/re_path/42.api', (), {'parent': '87', 'child': '42', 'foo': 'bar', 'format': 'api'}),
URLTestPath('/pre_path/87/re_path/42.asdf', (), {'parent': '87', 'child': '42', 'foo': 'bar', 'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def _test_allowed_formats(self, urlpatterns):
allowed_formats = ['good', 'ugly']
test_paths = [
(URLTestPath('/test.good/', (), {'format': 'good'}), True),
(URLTestPath('/test.bad', (), {}), False),
(URLTestPath('/test.ugly', (), {'format': 'ugly'}), True),
]
self._resolve_urlpatterns(urlpatterns, test_paths, allowed=allowed_formats)
def test_allowed_formats_re_path(self):
urlpatterns = [
re_path(r'^test$', dummy_view),
]
self._test_allowed_formats(urlpatterns)
def test_allowed_formats_path(self):
urlpatterns = [
path('test', dummy_view),
]
self._test_allowed_formats(urlpatterns)
| FormatSuffixTests |
python | astropy__astropy | astropy/table/tests/test_init_table.py | {
"start": 5625,
"end": 6783
} | class ____(BaseInitFromListLike):
def setup_method(self, table_type):
self._setup(table_type)
self.data = [
(np.int32(1), np.int32(3)),
Column(name="col1", data=[2, 4], dtype=np.int32),
np.array([3, 5], dtype=np.int32),
]
def test_default_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ["col0", "col1", "col2"]
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["b", None, "c"], dtype=["f4", None, "f8"])
assert t.colnames == ["b", "col1", "c"]
assert t["b"].dtype.type == np.float32
assert t["col1"].dtype.type == np.int32
assert t["c"].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_bad_data(self, table_type):
self._setup(table_type)
with pytest.raises(ValueError):
table_type([[1, 2], [3, 4, 5]])
@pytest.mark.usefixtures("table_type")
| TestInitFromListOfLists |
python | ray-project__ray | python/ray/tests/test_resource_demand_scheduler.py | {
"start": 53232,
"end": 59423
} | class ____(unittest.TestCase):
def testResourceDemandVector(self):
lm = LoadMetrics()
lm.update(
"1.1.1.1",
mock_node_id(),
{"CPU": 2},
{"CPU": 1},
0,
waiting_bundles=[{"GPU": 1}],
infeasible_bundles=[{"CPU": 16}],
)
assert same_elements(lm.get_resource_demand_vector(), [{"CPU": 16}, {"GPU": 1}])
def testPlacementGroupLoad(self):
lm = LoadMetrics()
pending_placement_groups = [
PlacementGroupTableData(
state=PlacementGroupTableData.RESCHEDULING,
strategy=PlacementStrategy.PACK,
bundles=([Bundle(unit_resources={"GPU": 2})] * 2),
),
PlacementGroupTableData(
state=PlacementGroupTableData.RESCHEDULING,
strategy=PlacementStrategy.SPREAD,
bundles=([Bundle(unit_resources={"GPU": 2})] * 2),
),
]
lm.update(
"1.1.1.1",
mock_node_id(),
{},
{},
DUMMY_IDLE_DURATION_S,
pending_placement_groups=pending_placement_groups,
)
assert lm.get_pending_placement_groups() == pending_placement_groups
def testSummary(self):
lm = LoadMetrics()
assert lm.summary() is not None
pending_placement_groups = [
PlacementGroupTableData(
state=PlacementGroupTableData.RESCHEDULING,
strategy=PlacementStrategy.PACK,
bundles=([Bundle(unit_resources={"GPU": 2})] * 2),
),
PlacementGroupTableData(
state=PlacementGroupTableData.RESCHEDULING,
strategy=PlacementStrategy.PACK,
bundles=([Bundle(unit_resources={"GPU": 2})] * 2),
),
]
lm.update(
"1.1.1.1",
mock_node_id(),
{
"CPU": 64,
"memory": 1000 * 1024 * 1024,
"object_store_memory": 2000 * 1024 * 1024,
},
{
"CPU": 2,
"memory": 500 * 1024 * 1024, # 500 MiB
"object_store_memory": 1000 * 1024 * 1024,
},
0,
)
lm.update(
"1.1.1.2",
mock_node_id(),
{
"CPU": 64,
"GPU": 8,
"accelerator_type:V100": 1,
},
{
"CPU": 0,
"GPU": 1,
"accelerator_type:V100": 1,
},
0,
)
lm.update(
"1.1.1.3",
mock_node_id(),
{"CPU": 64, "GPU": 8, "accelerator_type:V100": 1},
{"CPU": 0, "GPU": 0, "accelerator_type:V100": 0.92},
0,
)
lm.update(
"1.1.1.4",
mock_node_id(),
{"CPU": 2},
{"CPU": 2},
DUMMY_IDLE_DURATION_S,
waiting_bundles=[{"GPU": 2}] * 10,
infeasible_bundles=[{"CPU": 16}, {"GPU": 2}, {"CPU": 16, "GPU": 2}],
pending_placement_groups=pending_placement_groups,
)
lm.set_resource_requests([{"CPU": 64}, {"GPU": 8}, {"GPU": 8}])
summary = lm.summary()
assert summary.usage["CPU"] == (190, 194)
assert summary.usage["GPU"] == (15, 16)
assert summary.usage["memory"] == (500 * 2**20, 1000 * 2**20)
assert summary.usage["object_store_memory"] == (1000 * 2**20, 2000 * 2**20)
assert (
summary.usage["accelerator_type:V100"][1] == 2
), "Not comparing the usage value due to floating point error."
assert ({"GPU": 2}, 11) in summary.resource_demand
assert ({"CPU": 16}, 1) in summary.resource_demand
assert ({"CPU": 16, "GPU": 2}, 1) in summary.resource_demand
assert len(summary.resource_demand) == 3
assert (
{"bundles": [({"GPU": 2}, 2)], "strategy": "PACK"},
2,
) in summary.pg_demand
assert len(summary.pg_demand) == 1
assert ({"GPU": 8}, 2) in summary.request_demand
assert ({"CPU": 64}, 1) in summary.request_demand
assert len(summary.request_demand) == 2
# TODO (Alex): This set of nodes won't be very useful in practice
# because the node:xxx.xxx.xxx.xxx resources means that no 2 nodes
# should ever have the same set of resources.
assert len(summary.node_types) == 3, summary.node_types
# Ensure correct dict-conversion
summary_dict = asdict(summary)
assert summary_dict["usage"]["CPU"] == (190, 194)
assert summary_dict["usage"]["GPU"] == (15, 16)
assert summary_dict["usage"]["memory"] == (500 * 2**20, 1000 * 2**20)
assert summary_dict["usage"]["object_store_memory"] == (
1000 * 2**20,
2000 * 2**20,
)
assert (
summary_dict["usage"]["accelerator_type:V100"][1] == 2
), "Not comparing the usage value due to floating point error."
assert ({"GPU": 2}, 11) in summary_dict["resource_demand"]
assert ({"CPU": 16}, 1) in summary_dict["resource_demand"]
assert ({"CPU": 16, "GPU": 2}, 1) in summary_dict["resource_demand"]
assert len(summary_dict["resource_demand"]) == 3
assert ({"bundles": [({"GPU": 2}, 2)], "strategy": "PACK"}, 2) in summary_dict[
"pg_demand"
]
assert len(summary_dict["pg_demand"]) == 1
assert ({"GPU": 8}, 2) in summary_dict["request_demand"]
assert ({"CPU": 64}, 1) in summary_dict["request_demand"]
assert len(summary_dict["request_demand"]) == 2
assert len(summary_dict["node_types"]) == 3, summary_dict["node_types"]
# Ensure summary_dict is json-serializable
json.dumps(summary_dict)
# Backwards compatibility check: head_ip is correctly processed
# when included as an argument to LoadMetricsSummary.
summary_dict["head_ip"] = "1.1.1.1"
# No compatibility issue.
LoadMetricsSummary(**summary_dict)
| LoadMetricsTest |
python | walkccc__LeetCode | solutions/3371. Identify the Largest Outlier in an Array/3371.py | {
"start": 0,
"end": 409
} | class ____:
def getLargestOutlier(self, nums: list[int]) -> int:
ans = -math.inf
summ = sum(nums)
count = collections.Counter(nums)
for num in nums:
withoutNum = summ - num
if withoutNum % 2 == 0:
specialSum = withoutNum // 2 # the sum of special numbers
if count[specialSum] > (1 if num == specialSum else 0):
ans = max(ans, num)
return ans
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/typing.py | {
"start": 20398,
"end": 21032
} | class ____:
def __getattr__(self, key: str) -> tuple[type, ...]:
types = tuple(
{
t
for t in [
getattr(typing, key, None),
getattr(typing_extensions, key, None),
]
if t is not None
}
)
if not types:
raise AttributeError(key)
self.__dict__[key] = types
return types
_type_tuples = _TypingInstances()
if TYPE_CHECKING:
_type_instances = typing_extensions
else:
_type_instances = _type_tuples
LITERAL_TYPES = _type_tuples.Literal
| _TypingInstances |
python | getsentry__sentry | tests/sentry/integrations/bitbucket/test_repository.py | {
"start": 5865,
"end": 8271
} | class ____(IntegrationRepositoryTestCase):
provider_name = "integrations:bitbucket"
def setUp(self) -> None:
super().setUp()
self.base_url = "https://api.bitbucket.org"
self.shared_secret = "234567890"
self.subject = "connect:1234567"
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration = self.create_provider_integration(
provider="bitbucket",
external_id=self.subject,
name="MyBitBucket",
metadata={
"base_url": self.base_url,
"shared_secret": self.shared_secret,
"subject": self.subject,
},
)
self.integration.get_provider().setup()
self.integration.add_organization(self.organization, self.user)
self.repo = Repository.objects.create(
provider="bitbucket",
name="sentryuser/newsdiffs",
organization_id=self.organization.id,
config={"name": "sentryuser/newsdiffs"},
integration_id=self.integration.id,
)
self.default_repository_config = {"full_name": "getsentry/example-repo", "id": "123"}
def add_create_repository_responses(self, repository_config):
responses.add(
responses.GET,
f"{self.base_url}/2.0/repositories/{self.repo.name}",
json=repository_config,
)
responses.add(
responses.POST,
f"{self.base_url}/2.0/repositories/{self.repo.name}/hooks",
json={"uuid": "99"},
)
def test_create_repository_data_no_installation_id(self) -> None:
response = self.create_repository(self.default_repository_config, None)
assert response.status_code == 400
self.assert_error_message(response, "validation", "requires an integration id")
def test_create_repository_data_integration_does_not_exist(self) -> None:
integration_id = self.integration.id
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.delete()
response = self.create_repository(self.default_repository_config, integration_id)
assert response.status_code == 404
self.assert_error_message(
response, "not found", "Integration matching query does not exist."
)
| BitbucketCreateRepositoryTestCase |
python | apache__airflow | airflow-ctl/tests/airflow_ctl/api/test_operations.py | {
"start": 5839,
"end": 13870
} | class ____:
asset_id: int = 1
dag_id: str = "dag_id"
before: str = "2024-12-31T23:59:59+00:00"
asset_response = AssetResponse(
id=asset_id,
name="asset",
uri="asset_uri",
extra={"extra": "extra"}, # type: ignore[dict-item]
created_at=datetime.datetime(2024, 12, 31, 23, 59, 59),
updated_at=datetime.datetime(2025, 1, 1, 0, 0, 0),
scheduled_dags=[],
producing_tasks=[],
consuming_tasks=[],
aliases=[],
watchers=[],
group="group",
)
asset_alias_response = AssetAliasResponse(
id=asset_id,
name="asset",
group="group",
)
asset_queued_event_response = QueuedEventResponse(
dag_id=dag_id,
asset_id=asset_id,
created_at=datetime.datetime(2024, 12, 31, 23, 59, 59),
dag_display_name=dag_id,
)
asset_queued_event_collection_response = QueuedEventCollectionResponse(
queued_events=[asset_queued_event_response],
total_entries=1,
)
dag_run_response = DAGRunResponse(
dag_display_name=dag_id,
dag_run_id=dag_id,
dag_id=dag_id,
logical_date=datetime.datetime(2025, 1, 1, 0, 0, 0),
queued_at=datetime.datetime(2025, 1, 1, 0, 0, 0),
start_date=datetime.datetime(2025, 1, 1, 0, 0, 0),
end_date=datetime.datetime(2025, 1, 1, 0, 0, 0),
data_interval_start=datetime.datetime(2025, 1, 1, 0, 0, 0),
data_interval_end=datetime.datetime(2025, 1, 1, 0, 0, 0),
last_scheduling_decision=datetime.datetime(2025, 1, 1, 0, 0, 0),
run_type=DagRunType.MANUAL,
run_after=datetime.datetime(2025, 1, 1, 0, 0, 0),
state=DagRunState.RUNNING,
triggered_by=DagRunTriggeredByType.UI,
conf=None,
note=None,
dag_versions=[
DagVersionResponse(
id=uuid.uuid4(),
version_number=1,
dag_id=dag_id,
bundle_name="bundle_name",
bundle_version="1",
created_at=datetime.datetime(2025, 1, 1, 0, 0, 0),
dag_display_name=dag_id,
)
],
)
asset_create_event_body = CreateAssetEventsBody(asset_id=asset_id, extra=None)
assets_dag_reference = DagRunAssetReference(
run_id="manual__2025-01-01T00:00:00+00:00",
dag_id=dag_id,
logical_date=datetime.datetime(2025, 1, 1, 0, 0, 0),
start_date=datetime.datetime(2025, 1, 1, 0, 0, 0),
end_date=datetime.datetime(2025, 1, 1, 0, 0, 0),
state="RUNNING",
data_interval_start=datetime.datetime(2025, 1, 1, 0, 0, 0),
data_interval_end=datetime.datetime(2025, 1, 1, 0, 0, 0),
)
asset_event_response = AssetEventResponse(
id=asset_id,
asset_id=asset_id,
uri="uri",
name="asset",
group="group",
extra=None,
source_task_id="task_id",
source_dag_id=dag_id,
source_run_id="manual__2025-01-01T00:00:00+00:00",
source_map_index=1,
created_dagruns=[assets_dag_reference],
timestamp=datetime.datetime(2025, 1, 1, 0, 0, 0),
)
def test_get_asset(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/assets/{self.asset_id}"
return httpx.Response(200, json=json.loads(self.asset_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.assets.get(self.asset_id)
assert response == self.asset_response
def test_get_by_alias(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/assets/aliases/{self.asset_id}"
return httpx.Response(200, json=json.loads(self.asset_alias_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.assets.get_by_alias(self.asset_id)
assert response == self.asset_alias_response
def test_list(self):
assets_collection_response = AssetCollectionResponse(
assets=[self.asset_response],
total_entries=1,
)
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == "/api/v2/assets"
return httpx.Response(200, json=json.loads(assets_collection_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.assets.list()
assert response == assets_collection_response
def test_list_by_alias(self):
assets_collection_response = AssetAliasCollectionResponse(
asset_aliases=[self.asset_alias_response],
total_entries=1,
)
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == "/api/v2/assets/aliases"
return httpx.Response(200, json=json.loads(assets_collection_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.assets.list_by_alias()
assert response == assets_collection_response
def test_create_event(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == "/api/v2/assets/events"
return httpx.Response(200, json=json.loads(self.asset_event_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.assets.create_event(asset_event_body=self.asset_create_event_body)
assert response == self.asset_event_response
def test_materialize(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/assets/{self.asset_id}/materialize"
return httpx.Response(200, json=json.loads(self.dag_run_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.assets.materialize(asset_id=self.asset_id)
assert response == self.dag_run_response
def test_get_queued_events(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/assets/{self.asset_id}/queuedEvents"
return httpx.Response(
200, json=json.loads(self.asset_queued_event_collection_response.model_dump_json())
)
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.assets.get_queued_events(asset_id=self.asset_id)
assert response == self.asset_queued_event_collection_response
def test_get_dag_queued_events(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/dags/{self.dag_id}/assets/queuedEvents"
return httpx.Response(
200, json=json.loads(self.asset_queued_event_collection_response.model_dump_json())
)
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.assets.get_dag_queued_events(dag_id=self.dag_id, before=self.before)
assert response == self.asset_queued_event_collection_response
def test_get_dag_queued_event(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == f"/api/v2/dags/{self.dag_id}/assets/{self.asset_id}/queuedEvents"
return httpx.Response(200, json=json.loads(self.asset_queued_event_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.assets.get_dag_queued_event(dag_id=self.dag_id, asset_id=self.asset_id)
assert response == self.asset_queued_event_response
| TestAssetsOperations |
python | neetcode-gh__leetcode | python/0051-n-queens.py | {
"start": 0,
"end": 868
} | class ____:
def solveNQueens(self, n: int) -> List[List[str]]:
col = set()
posDiag = set() # (r + c)
negDiag = set() # (r - c)
res = []
board = [["."] * n for i in range(n)]
def backtrack(r):
if r == n:
copy = ["".join(row) for row in board]
res.append(copy)
return
for c in range(n):
if c in col or (r + c) in posDiag or (r - c) in negDiag:
continue
col.add(c)
posDiag.add(r + c)
negDiag.add(r - c)
board[r][c] = "Q"
backtrack(r + 1)
col.remove(c)
posDiag.remove(r + c)
negDiag.remove(r - c)
board[r][c] = "."
backtrack(0)
return res
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 350330,
"end": 351080
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateIpAllowListEnabledSetting"""
__schema__ = github_schema
__field_names__ = ("owner_id", "setting_value", "client_mutation_id")
owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="ownerId")
"""The ID of the owner on which to set the IP allow list enabled
setting.
"""
setting_value = sgqlc.types.Field(sgqlc.types.non_null(IpAllowListEnabledSettingValue), graphql_name="settingValue")
"""The value for the IP allow list enabled setting."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateIpAllowListEnabledSettingInput |
python | getsentry__sentry | tests/sentry/sentry_apps/services/test_hook_service.py | {
"start": 566,
"end": 13712
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user()
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(name="foo", organization=self.org)
self.sentry_app = self.create_sentry_app(
organization_id=self.org.id, events=["issue.created"]
)
def call_create_hook(
self, project_ids: list[int] | None = None, events: list[str] | None = None
) -> RpcServiceHook:
events = events or ["event.created"]
return hook_service.create_service_hook(
application_id=self.sentry_app.application.id,
actor_id=self.sentry_app.proxy_user.id,
organization_id=self.org.id,
project_ids=project_ids,
events=events,
url=self.sentry_app.webhook_url,
)
def test_creates_service_hook(self) -> None:
self.call_create_hook()
with assume_test_silo_mode(SiloMode.REGION):
service_hook = ServiceHook.objects.get(
application_id=self.sentry_app.application_id,
actor_id=self.sentry_app.proxy_user.id,
url=self.sentry_app.webhook_url,
)
assert service_hook
assert service_hook.events == ["event.created"]
def test_expands_resource_events_to_specific_events(self) -> None:
service_hook = self.call_create_hook(events=["issue"])
assert service_hook.events == EVENT_EXPANSION[SentryAppResourceType.ISSUE]
def test_expand_events(self) -> None:
assert expand_events(["issue"]) == EVENT_EXPANSION[SentryAppResourceType.ISSUE]
def test_expand_events_multiple(self) -> None:
ret = expand_events(["unrelated", "issue", "comment", "unrelated"])
assert ret == [
"comment.created",
"comment.deleted",
"comment.updated",
"issue.assigned",
"issue.created",
"issue.ignored",
"issue.resolved",
"issue.unresolved",
"unrelated",
]
def test_consolidate_events(self) -> None:
assert consolidate_events(["issue.created"]) == {"issue"}
def test_update_webhook_and_events_with_webhook_url(self) -> None:
installation1 = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.org, user=self.user
)
installation2 = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.org, user=self.user
)
with assume_test_silo_mode(SiloMode.REGION):
hook1 = ServiceHook.objects.get(
installation_id=installation1.id, application_id=self.sentry_app.application.id
)
hook2 = ServiceHook.objects.get(
installation_id=installation2.id, application_id=self.sentry_app.application.id
)
ServiceHook.objects.filter(application_id=self.sentry_app.application.id).update(
events=["comment.created"]
)
# Call the update method
result = hook_service.update_webhook_and_events(
organization_id=self.org.id,
application_id=self.sentry_app.application.id,
webhook_url=self.sentry_app.webhook_url,
events=self.sentry_app.events,
)
# Verify the result
assert len(result) == 2
# Verify hooks were updated in database
with assume_test_silo_mode(SiloMode.REGION):
updated_hook1 = ServiceHook.objects.get(id=hook1.id)
updated_hook2 = ServiceHook.objects.get(id=hook2.id)
assert updated_hook1.url == self.sentry_app.webhook_url
assert updated_hook2.url == self.sentry_app.webhook_url
# Events should be expanded
expected_events = self.sentry_app.events
assert updated_hook1.events == expected_events
assert updated_hook2.events == expected_events
assert updated_hook2.events == ["issue.created"]
def test_update_webhook_and_events_with_many_installations(self) -> None:
# Create 1000 webhooks
with assume_test_silo_mode(SiloMode.REGION):
hooks_to_create = []
for _ in range(10000):
# these hooks arent accurate to actual installation hooks
# but it's enough to test the performance of the update
hooks_to_create.append(
ServiceHook(
application_id=self.sentry_app.application.id,
actor_id=self.user.id,
installation_id=self.user.id,
url=self.sentry_app.webhook_url,
events=["comment.created", "error.created"],
)
)
ServiceHook.objects.bulk_create(hooks_to_create)
assert (
ServiceHook.objects.filter(
application_id=self.sentry_app.application.id,
events=["comment.created", "error.created"],
).count()
== 10000
)
result = hook_service.update_webhook_and_events(
organization_id=self.org.id,
application_id=self.sentry_app.application.id,
webhook_url=self.sentry_app.webhook_url,
events=self.sentry_app.events,
)
with assume_test_silo_mode(SiloMode.REGION):
assert len(result) == 10000
assert (
ServiceHook.objects.filter(
application_id=self.sentry_app.application.id,
events=self.sentry_app.events,
).count()
== 10000
)
assert (
ServiceHook.objects.filter(
application_id=self.sentry_app.application.id,
events=["comment.created", "error.created"],
).count()
== 0
)
def test_update_webhook_and_events_without_webhook_url(self) -> None:
# Create service hooks
hook1 = self.create_service_hook(
application=self.sentry_app.application,
organization=self.org,
url="https://example.com",
events=["issue.created"],
)
hook2 = self.create_service_hook(
application=self.sentry_app.application,
organization=self.org,
url="https://example2.com",
events=["comment.created"],
)
# Call update with webhook_url=None (should delete hooks)
result = hook_service.update_webhook_and_events(
organization_id=self.org.id,
application_id=self.sentry_app.application.id,
webhook_url=None,
events=["issue"],
)
# Should return empty list
assert result == []
# Verify hooks were deleted
with assume_test_silo_mode(SiloMode.REGION):
assert not ServiceHook.objects.filter(id=hook1.id).exists()
assert not ServiceHook.objects.filter(id=hook2.id).exists()
def test_update_webhook_and_events_no_matching_hooks(self) -> None:
# Create a hook for a different application
other_app = self.create_sentry_app(name="other-app", organization_id=self.org.id)
self.create_service_hook(
application=other_app.application,
organization=self.org,
url="https://example.com",
events=["issue.created"],
)
# Try to update hooks for our app (should find no hooks)
result = hook_service.update_webhook_and_events(
organization_id=self.org.id,
application_id=self.sentry_app.application.id,
webhook_url="https://new-url.com",
events=["issue"],
)
# Should return empty list since no hooks match the application_id
assert result == []
def test_create_or_update_webhook_and_events_for_installation_create(self) -> None:
# Create an installation
installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.org, user=self.user
)
with assume_test_silo_mode(SiloMode.REGION):
hook = ServiceHook.objects.get(
installation_id=installation.id, application_id=self.sentry_app.application.id
)
hook.delete()
# Call create_or_update (should create since no hook exists)
result = hook_service.create_or_update_webhook_and_events_for_installation(
installation_id=installation.id,
organization_id=self.org.id,
webhook_url=self.sentry_app.webhook_url,
events=self.sentry_app.events,
application_id=self.sentry_app.application.id,
)
# Should return one hook
assert len(result) == 1
# Verify hook was created in database
with assume_test_silo_mode(SiloMode.REGION):
hook = ServiceHook.objects.get(
installation_id=installation.id, application_id=self.sentry_app.application.id
)
assert hook.url == self.sentry_app.webhook_url
assert hook.events == self.sentry_app.events
def test_create_or_update_webhook_and_events_for_installation_update(self) -> None:
# Create an installation and update events to be mismatched
installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.org, user=self.user
)
with assume_test_silo_mode(SiloMode.REGION):
hook = ServiceHook.objects.get(
installation_id=installation.id, application_id=self.sentry_app.application.id
)
hook.events = ["error.created"]
hook.save()
# Call create_or_update (should update existing hook to match sentry app)
result = hook_service.create_or_update_webhook_and_events_for_installation(
installation_id=installation.id,
organization_id=self.org.id,
webhook_url=self.sentry_app.webhook_url,
events=self.sentry_app.events,
application_id=self.sentry_app.application.id,
)
# Should return one hook
assert len(result) == 1
# Verify hook was updated (same ID, new values)
with assume_test_silo_mode(SiloMode.REGION):
updated_hook = ServiceHook.objects.get(id=hook.id)
assert updated_hook.url == self.sentry_app.webhook_url
assert updated_hook.events == self.sentry_app.events
# Should still be only one hook for this installation
hooks_count = ServiceHook.objects.filter(
installation_id=installation.id, application_id=self.sentry_app.application.id
).count()
assert hooks_count == 1
def test_create_or_update_webhook_and_events_for_installation_delete(self) -> None:
installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.org, user=self.user
)
with assume_test_silo_mode(SiloMode.REGION):
existing_hook = ServiceHook.objects.get(
installation_id=installation.id, application_id=self.sentry_app.application.id
)
# Call create_or_update with webhook_url=None (should delete)
result = hook_service.create_or_update_webhook_and_events_for_installation(
installation_id=installation.id,
organization_id=self.org.id,
webhook_url=None,
events=["issue"],
application_id=self.sentry_app.application.id,
)
# Should return empty list
assert result == []
# Verify hook was deleted
with assume_test_silo_mode(SiloMode.REGION):
assert not ServiceHook.objects.filter(id=existing_hook.id).exists()
def test_create_or_update_webhook_and_events_for_installation_delete_nonexistent(self) -> None:
# Create an installation but no hook
installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.org, user=self.user
)
with assume_test_silo_mode(SiloMode.REGION):
existing_hook = ServiceHook.objects.get(
installation_id=installation.id, application_id=self.sentry_app.application.id
)
existing_hook.delete()
# Call create_or_update with webhook_url=None (should handle gracefully)
result = hook_service.create_or_update_webhook_and_events_for_installation(
installation_id=installation.id,
organization_id=self.org.id,
webhook_url=None,
events=["issue"],
application_id=self.sentry_app.application.id,
)
# Should return empty list and not raise exception
assert result == []
@all_silo_test(regions=create_test_regions("us", "de"))
| TestHookService |
python | openai__openai-python | src/openai/types/beta/threads/run_create_params.py | {
"start": 8325,
"end": 9472
} | class ____(TypedDict, total=False):
content: Required[Union[str, Iterable[MessageContentPartParam]]]
"""The text contents of the message."""
role: Required[Literal["user", "assistant"]]
"""The role of the entity that is creating the message. Allowed values include:
- `user`: Indicates the message is sent by an actual user and should be used in
most cases to represent user-generated messages.
- `assistant`: Indicates the message is generated by the assistant. Use this
value to insert messages from the assistant into the conversation.
"""
attachments: Optional[Iterable[AdditionalMessageAttachment]]
"""A list of files attached to the message, and the tools they should be added to."""
metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
| AdditionalMessage |
python | encode__httpx | httpx/_exceptions.py | {
"start": 3342,
"end": 3444
} | class ____(TimeoutException):
"""
Timed out while sending data to the host.
"""
| WriteTimeout |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B024.py | {
"start": 1506,
"end": 1598
} | class ____(notabc.ABC, abc.ABCMeta): # safe
def method(self):
foo()
| multi_super_1 |
python | getsentry__sentry | src/sentry/replays/lib/new_query/fields.py | {
"start": 1365,
"end": 1714
} | class ____(Protocol):
"""Field interface.
Any instance which defines an "apply" method which accepts a "SearchFilter" and returns a
"Condition" is considered a field. Additional methods and state may be added to help
construct the "Condition".
"""
def apply(self, search_filter: SearchFilter) -> Condition: ...
| FieldProtocol |
python | scipy__scipy | scipy/signal/tests/test_signaltools.py | {
"start": 94818,
"end": 94924
} | class ____(_TestLinearFilter):
dtype = 'float64'
@skip_xp_backends(np_only=True)
| TestLinearFilterFloat64 |
python | doocs__leetcode | solution/2300-2399/2311.Longest Binary Subsequence Less Than or Equal to K/Solution.py | {
"start": 0,
"end": 294
} | class ____:
def longestSubsequence(self, s: str, k: int) -> int:
ans = v = 0
for c in s[::-1]:
if c == "0":
ans += 1
elif ans < 30 and (v | 1 << ans) <= k:
v |= 1 << ans
ans += 1
return ans
| Solution |
python | marshmallow-code__marshmallow | tests/test_serialization.py | {
"start": 594,
"end": 694
} | class ____:
def __init__(self, dtime_int):
self.dtime_int = dtime_int
| DateTimeIntegerTuple |
python | numba__numba | numba/tests/test_function_type.py | {
"start": 36956,
"end": 39010
} | class ____(MemoryLeakMixin, TestCase):
def test_exception_raising(self):
class MyError(Exception):
pass
@njit
def add(x, y):
res = x + y
if res > 100:
raise MyError(res)
return res
fnty = types.FunctionType(int64(int64, int64))
@njit(int64(fnty))
def callme(fn):
c = 0
for i in range(100):
c = fn(c, i)
return c
@njit
def bar():
return callme(add)
# Pass Dispatcher as a global reference
with self.assertRaises(MyError) as exc:
bar()
self.assertEqual(exc.exception.args, (105,))
# Pass Dispatcher by argument
with self.assertRaises(MyError) as exc:
callme(add)
self.assertEqual(exc.exception.args, (105,))
def test_exception_ignored_in_cfunc(self):
class MyError(Exception):
pass
@njit
def add(x, y):
res = x + y
if res > 100:
raise MyError(res)
return res
fnty = types.FunctionType(int64(int64, int64))
@njit(int64(fnty, int64, int64))
def callme(fn, x, y):
return fn(x, y)
# Cfunc as argument will ignore raised exception
@cfunc(int64(int64, int64))
def c_add(x, y):
return add(x, y)
self.assertEqual(callme(c_add, 12, 32), 44)
# If unittest is buffering (-b), the message goes to Python level stderr
# otherwise, it goes to C stderr.
with redirect_c_stderr() as c_stderr, captured_stderr() as stderr:
# raise ignored and result is garbage
callme(c_add, 100, 1)
sys.stderr.flush()
err = c_stderr.read()
if not err:
err = stderr.getvalue()
self.assertIn("Exception ignored in:", err)
self.assertIn(str(MyError(101)), err)
if __name__ == '__main__':
unittest.main()
| TestExceptionInFunctionType |
python | pytorch__pytorch | torch/_dynamo/variables/higher_order_ops.py | {
"start": 105923,
"end": 106671
} | class ____(TorchHigherOrderOperatorVariable):
def _call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
from .builder import wrap_fx_proxy
args, kwargs = LazyVariableTracker.realize_all((args, kwargs))
args_proxy = [arg.as_proxy() for arg in args]
kwargs_proxy = {k: v.as_proxy() for k, v in kwargs.items()}
return wrap_fx_proxy(
tx=tx,
proxy=tx.output.create_proxy(
"call_function",
self.value,
args=tuple(args_proxy),
kwargs=kwargs_proxy,
),
)
| PrintHigherOrderVariable |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/test/steps/common.py | {
"start": 23514,
"end": 23620
} | class ____(Enum):
ALL = "live"
REGRESSION = "regression"
VALIDATION = "validation"
| LiveTestSuite |
python | PrefectHQ__prefect | src/integrations/prefect-aws/tests/test_client_parameters.py | {
"start": 173,
"end": 5643
} | class ____:
@pytest.mark.parametrize(
"params,result",
[
(AwsClientParameters(), {"use_ssl": True}),
(
AwsClientParameters(
use_ssl=False, verify=False, endpoint_url="http://localhost:9000"
),
{
"use_ssl": False,
"verify": False,
"endpoint_url": "http://localhost:9000",
},
),
(
AwsClientParameters(endpoint_url="https://localhost:9000"),
{"use_ssl": True, "endpoint_url": "https://localhost:9000"},
),
(
AwsClientParameters(api_version="1.0.0"),
{"use_ssl": True, "api_version": "1.0.0"},
),
],
)
def test_get_params_override_expected_output(
self, params: AwsClientParameters, result: Dict[str, Any], tmp_path
):
assert result == params.get_params_override()
@pytest.mark.parametrize(
"params,result",
[
(
AwsClientParameters(
config=dict(
region_name="eu_west_1",
retries={"max_attempts": 10, "mode": "standard"},
signature_version="unsigned",
)
),
{
"config": {
"region_name": "eu_west_1",
"retries": {"max_attempts": 10, "mode": "standard"},
"signature_version": UNSIGNED,
},
},
),
],
)
def test_with_custom_config(
self, params: AwsClientParameters, result: Dict[str, Any]
):
assert (
result["config"]["region_name"]
== params.get_params_override()["config"].region_name
)
assert (
result["config"]["retries"]
== params.get_params_override()["config"].retries
)
def test_with_not_verify_and_verify_cert_path(self, tmp_path):
cert_path = tmp_path / "ca-bundle.crt"
cert_path.touch()
with pytest.warns(
UserWarning, match="verify_cert_path is set but verify is False"
):
params = AwsClientParameters(verify=False, verify_cert_path=cert_path)
assert params.verify_cert_path is None
assert not params.verify
def test_get_params_override_with_config_with_deprecated_verify(self, tmp_path):
cert_path = tmp_path / "ca-bundle.crt"
cert_path.touch()
with pytest.warns(DeprecationWarning, match="verify should be a boolean"):
params = AwsClientParameters(verify=cert_path)
assert params.verify
assert not params.verify_cert_path
override_params = params.get_params_override()
override_params["verify"] == cert_path
def test_get_params_override_with_config(self, tmp_path):
cert_path = tmp_path / "ca-bundle.crt"
cert_path.touch()
params = AwsClientParameters(
config=Config(
region_name="eu_west_1",
retries={"max_attempts": 10, "mode": "standard"},
),
verify_cert_path=cert_path,
)
override_params = params.get_params_override()
override_params["config"].region_name == "eu_west_1"
override_params["config"].retries == {
"max_attempts": 10,
"mode": "standard",
}
def test_get_params_override_with_verify_cert_path(self, tmp_path):
cert_path = tmp_path / "ca-bundle.crt"
cert_path.touch()
params = AwsClientParameters(verify_cert_path=cert_path)
override_params = params.get_params_override()
assert override_params["verify"] == cert_path
def test_get_params_override_with_both_cert_path(self, tmp_path):
old_cert_path = tmp_path / "old-ca-bundle.crt"
old_cert_path.touch()
cert_path = tmp_path / "ca-bundle.crt"
cert_path.touch()
with pytest.warns(
UserWarning, match="verify_cert_path is set but verify is also set"
):
params = AwsClientParameters(
verify=old_cert_path, verify_cert_path=cert_path
)
override_params = params.get_params_override()
assert override_params["verify"] == cert_path
def test_get_params_override_with_default_verify(self):
params = AwsClientParameters()
override_params = params.get_params_override()
assert "verify" not in override_params, (
"verify should not be in params_override when not explicitly set"
)
def test_get_params_override_with_explicit_verify(self):
params_true = AwsClientParameters(verify=True)
params_false = AwsClientParameters(verify=False)
override_params_true = params_true.get_params_override()
override_params_false = params_false.get_params_override()
assert "verify" in override_params_true, (
"verify should be in params_override when explicitly set to True"
)
assert override_params_true["verify"] is True
assert "verify" in override_params_false, (
"verify should be in params_override when explicitly set to False"
)
assert override_params_false["verify"] is False
| TestAwsClientParameters |
python | keras-team__keras | keras/src/utils/file_utils_test.py | {
"start": 4342,
"end": 7835
} | class ____(test_case.TestCase):
def setUp(self):
self.base_dir = os.path.join(os.getcwd(), "temp_dir")
os.makedirs(self.base_dir, exist_ok=True)
self.tar_path = os.path.join(self.base_dir, "test.tar")
def tearDown(self):
os.remove(self.tar_path)
shutil.rmtree(self.base_dir)
def test_member_within_base_dir(self):
"""Test a member within the base directory."""
with tarfile.open(self.tar_path, "w") as tar:
tar.add(__file__, arcname="safe_path.txt")
with tarfile.open(self.tar_path, "r") as tar:
members = list(file_utils.filter_safe_tarinfos(tar.getmembers()))
self.assertEqual(len(members), 1)
self.assertEqual(members[0].name, "safe_path.txt")
def test_symlink_within_base_dir(self):
"""Test a symlink pointing within the base directory."""
symlink_path = os.path.join(self.base_dir, "symlink.txt")
target_path = os.path.join(self.base_dir, "target.txt")
with open(target_path, "w") as f:
f.write("target")
os.symlink(target_path, symlink_path)
with tarfile.open(self.tar_path, "w") as tar:
tar.add(symlink_path, arcname="symlink.txt")
with tarfile.open(self.tar_path, "r") as tar:
members = list(file_utils.filter_safe_tarinfos(tar.getmembers()))
self.assertEqual(len(members), 1)
self.assertEqual(members[0].name, "symlink.txt")
os.remove(symlink_path)
os.remove(target_path)
def test_invalid_path_warning(self):
"""Test warning for an invalid path during archive extraction."""
invalid_path = os.path.join(os.getcwd(), "invalid.txt")
with open(invalid_path, "w") as f:
f.write("invalid")
with tarfile.open(self.tar_path, "w") as tar:
tar.add(
invalid_path, arcname="../../invalid.txt"
) # Path intended to be outside of base dir
with tarfile.open(self.tar_path, "r") as tar:
with patch("warnings.warn") as mock_warn:
_ = list(file_utils.filter_safe_tarinfos(tar.getmembers()))
warning_msg = (
"Skipping invalid path during archive extraction: "
"'../../invalid.txt'."
)
mock_warn.assert_called_with(warning_msg, stacklevel=2)
os.remove(invalid_path)
def test_symbolic_link_in_base_dir(self):
"""symbolic link within the base directory is correctly processed."""
symlink_path = os.path.join(self.base_dir, "symlink.txt")
target_path = os.path.join(self.base_dir, "target.txt")
# Create a target file and then a symbolic link pointing to it.
with open(target_path, "w") as f:
f.write("target")
os.symlink(target_path, symlink_path)
# Add the symbolic link to the tar archive.
with tarfile.open(self.tar_path, "w") as tar:
tar.add(symlink_path, arcname="symlink.txt")
with tarfile.open(self.tar_path, "r") as tar:
members = list(file_utils.filter_safe_tarinfos(tar.getmembers()))
self.assertEqual(len(members), 1)
self.assertEqual(members[0].name, "symlink.txt")
self.assertTrue(
members[0].issym()
) # Explicitly assert it's a symbolic link.
os.remove(symlink_path)
os.remove(target_path)
| FilterSafePathsTest |
python | doocs__leetcode | solution/1200-1299/1238.Circular Permutation in Binary Representation/Solution.py | {
"start": 0,
"end": 190
} | class ____:
def circularPermutation(self, n: int, start: int) -> List[int]:
g = [i ^ (i >> 1) for i in range(1 << n)]
j = g.index(start)
return g[j:] + g[:j]
| Solution |
python | coleifer__peewee | tests/postgres.py | {
"start": 1937,
"end": 2017
} | class ____(TestModel):
name = CharField()
duration = IntervalField()
| Event |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 39820,
"end": 40883
} | class ____(TestCase):
validator = None # wsgiref.validate.IteratorWrapper([]) does not have __len__
def application(self, environ, start_response):
path_bytes = b'/\xd0\xbf\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82'
if PY3:
# Under PY3, the escapes were decoded as latin-1
path_bytes = path_bytes.decode('latin-1')
self.assertEqual(environ['PATH_INFO'], path_bytes)
self.assertEqual(environ['QUERY_STRING'], '%D0%B2%D0%BE%D0%BF%D1%80%D0%BE%D1%81=%D0%BE%D1%82%D0%B2%D0%B5%D1%82')
start_response("200 PASSED", [('Content-Type', 'text/plain')])
return []
def test(self):
with self.connect() as sock:
sock.sendall(
b'''GET /%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82?%D0%B2%D0%BE%D0%BF%D1%80%D0%BE%D1%81=%D0%BE%D1%82%D0%B2%D0%B5%D1%82 HTTP/1.1
Host: localhost
Connection: close
'''.replace(b'\n', b'\r\n'))
with sock.makefile() as fd:
read_http(fd, reason='PASSED', chunks=False, body='', content_length=0)
| TestInternational |
python | google__pytype | pytype/constant_folding.py | {
"start": 3187,
"end": 3359
} | class ____:
"""A dictionary."""
key_types: frozenset[Any]
keys: tuple[Any, ...]
value_types: frozenset[Any]
values: tuple[Any, ...]
elements: dict[Any, Any]
| _Map |
python | geekcomputers__Python | Emoji Dictionary/emoji_dictionary.py | {
"start": 378,
"end": 8008
} | class ____(tk.Frame):
cells = [
["😀", "🥰", "😴", "🤓", "🤮", "🤬", "😨", "🤑", "😫", "😎"],
[
"🐒",
"🐕",
"🐎",
"🐪",
"🐁",
"🐘",
"🦘",
"🦈",
"🐓",
"🐝",
"👀",
"🦴",
"👩🏿",
"🤝",
"🧑",
"🏾",
"👱🏽",
"♀",
"🎞",
"🎨",
"⚽",
],
[
"🍕",
"🍗",
"🍜",
"☕",
"🍴",
"🍉",
"🍓",
"🌴",
"🌵",
"🛺",
"🚲",
"🛴",
"🚉",
"🚀",
"✈",
"🛰",
"🚦",
"🏳",
"🌈",
"🌎",
"🧭",
],
[
"🔥",
"❄",
"🌟",
"🌞",
"🌛",
"🌝",
"🌧",
"🧺",
"🧷",
"🪒",
"⛲",
"🗼",
"🕌",
"👁",
"🗨",
"💬",
"™",
"💯",
"🔕",
"💥",
"❤",
],
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target = None
self.memory = ""
for y, row in enumerate(self.cells):
for x, item in enumerate(row):
b = tk.Button(
self,
text=item,
command=lambda text=item: self.append(text),
font=("Arial", 14),
bg="yellow",
fg="blue",
borderwidth=3,
relief="raised",
)
b.grid(row=y, column=x, sticky="news")
x = tk.Button(
self,
text="Space",
command=self.space,
font=("Arial", 14),
bg="yellow",
fg="blue",
borderwidth=3,
relief="raised",
)
x.grid(row=0, column=10, columnspan="2", sticky="news")
x = tk.Button(
self,
text="tab",
command=self.tab,
font=("Arial", 14),
bg="yellow",
fg="blue",
borderwidth=3,
relief="raised",
)
x.grid(row=0, column=12, columnspan="2", sticky="news")
x = tk.Button(
self,
text="Backspace",
command=self.backspace,
font=("Arial", 14),
bg="yellow",
fg="blue",
borderwidth=3,
relief="raised",
)
x.grid(row=0, column=14, columnspan="3", sticky="news")
x = tk.Button(
self,
text="Clear",
command=self.clear,
font=("Arial", 14),
bg="yellow",
fg="blue",
borderwidth=3,
relief="raised",
)
x.grid(row=0, column=17, columnspan="2", sticky="news")
x = tk.Button(
self,
text="Hide",
command=self.hide,
font=("Arial", 14),
bg="yellow",
fg="blue",
borderwidth=3,
relief="raised",
)
x.grid(row=0, column=19, columnspan="2", sticky="news")
def get(self):
if self.target:
return self.target.get()
def append(self, text):
if self.target:
self.target.insert("end", text)
def clear(self):
if self.target:
self.target.delete(0, END)
def backspace(self):
if self.target:
text = self.get()
text = text[:-1]
self.clear()
self.append(text)
def space(self):
if self.target:
text = self.get()
text = text + " "
self.clear()
self.append(text)
def tab(self): # 5 spaces
if self.target:
text = self.get()
text = text + " "
self.clear()
self.append(text)
def copy(self):
# TODO: copy to clipboad
if self.target:
self.memory = self.get()
self.label["text"] = "memory: " + self.memory
print(self.memory)
def paste(self):
# TODO: copy from clipboad
if self.target:
self.append(self.memory)
def show(self, entry):
self.target = entry
self.place(relx=0.5, rely=0.6, anchor="c")
def hide(self):
self.target = None
self.place_forget()
# function defined th=o clear both the input text and output text --------------------------------------------------
def clear_text():
inputentry.delete(0, END)
outputtxt.delete("1.0", "end")
# function to search emoji
def search_emoji():
word = inputentry.get()
if word == "":
outputtxt.insert(END, "You have entered no emoji.")
else:
means = emoji.demojize(word)
outputtxt.insert(END, "Meaning of Emoji : " + str(word) + "\n\n" + means)
# main window created
window = tk.Tk()
window.title("Emoji Dictionary")
window.geometry("1000x700")
# for writing Dictionary label, at the top of window
dic = tk.Label(
text="EMOJI DICTIONARY", font=("Arial", 50, "underline"), fg="magenta"
) # same way bg
dic.place(x=160, y=10)
start1 = tk.Label(
text="Enter any Emoji you want to search...", font=("Arial", 30), fg="green"
) # same way bg
start1.place(x=160, y=120)
myname = StringVar(window)
firstclick1 = True
def on_inputentry_click(event):
"""function that gets called whenever entry1 is clicked"""
global firstclick1
if firstclick1: # if this is the first time they clicked it
firstclick1 = False
inputentry.delete(0, "end") # delete all the text in the entry
# Taking input from TextArea
# inputentry = Entry(window,font=("Arial", 35), width=33, border=2)
inputentry = Entry(
window, font=("Arial", 35), width=28, border=2, bg="light yellow", fg="brown"
)
inputentry.place(x=120, y=180)
# # Creating Search Button
Button(
window,
text="🔍 SEARCH",
command=search_emoji,
font=("Arial", 20),
bg="light green",
fg="blue",
borderwidth=3,
relief="raised",
).place(x=270, y=250)
# # creating clear button
Button(
window,
text="🧹 CLEAR",
command=clear_text,
font=("Arial", 20),
bg="orange",
fg="blue",
borderwidth=3,
relief="raised",
).place(x=545, y=250)
# meaning label
start1 = tk.Label(text="Meaning...", font=("Arial", 30), fg="green") # same way bg
start1.place(x=160, y=340)
# # Output TextBox Creation
outputtxt = tk.Text(
window,
height=7,
width=57,
font=("Arial", 17),
bg="light yellow",
fg="brown",
borderwidth=3,
relief="solid",
)
outputtxt.place(x=120, y=400)
# function for exiting
def exit_win():
if mbox.askokcancel("Exit", "Do you want to exit?"):
window.destroy()
# # creating exit button
Button(
window,
text="❌ EXIT",
command=exit_win,
font=("Arial", 20),
bg="red",
fg="black",
borderwidth=3,
relief="raised",
).place(x=435, y=610)
keypad = Keypad(window)
# # creating speech to text button
v_keypadb = Button(
window,
text="⌨",
command=lambda: keypad.show(inputentry),
font=("Arial", 18),
bg="light yellow",
fg="green",
borderwidth=3,
relief="raised",
).place(x=870, y=183)
window.protocol("WM_DELETE_WINDOW", exit_win)
window.mainloop()
| Keypad |
python | keon__algorithms | tests/test_strings.py | {
"start": 9884,
"end": 10450
} | class ____(unittest.TestCase):
"""[summary]
Test for the file reverse_string.py
Arguments:
unittest {[type]} -- [description]
"""
def test_recursive(self):
self.assertEqual("ereht olleh", recursive("hello there"))
def test_iterative(self):
self.assertEqual("ereht olleh", iterative("hello there"))
def test_pythonic(self):
self.assertEqual("ereht olleh", pythonic("hello there"))
def test_ultra_pythonic(self):
self.assertEqual("ereht olleh", ultra_pythonic("hello there"))
| TestReverseString |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 381581,
"end": 382290
} | class ____(sgqlc.types.Input):
"""Ways in which lists of workflow runs can be ordered upon return."""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(WorkflowRunOrderField), graphql_name="field")
"""The field by which to order workflows."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The direction in which to order workflow runs by the specified
field.
"""
########################################################################
# Output Objects and Interfaces
########################################################################
| WorkflowRunOrder |
python | wandb__wandb | tests/unit_tests/test_launch/test_agent/test_config.py | {
"start": 267,
"end": 4143
} | class ____:
def __init__(self, api, config):
self.api = api
self.config = config
async def loop(*args, **kwargs):
pass
@pytest.fixture
def mock_agent(monkeypatch):
monkeypatch.setattr(
"wandb.sdk.launch._launch.LaunchAgent", lambda *args, **kwargs: MockAgent
)
@pytest.mark.parametrize(
"config, error",
[
# Valid configs
(
{
"entity": "test-entity",
},
False,
),
(
{
"entity": "test-entity",
"queues": ["test-queue"],
},
False,
),
(
{
"entity": "test-entity",
"queues": ["test-queue"],
"builder": {
"type": "docker",
},
"registry": {
"type": "ecr",
},
},
False,
),
(
{
"entity": "test-entity",
},
False,
),
# Registry type invalid.
(
{
"entity": "test-entity",
"queues": ["test-queue"],
"builder": {
"type": "docker",
},
"registry": {
"type": "ecrr",
},
},
True,
),
],
)
def test_create_and_run_agent(config, error, mock_agent):
if error:
with pytest.raises(LaunchError):
create_and_run_agent(MagicMock(), config)
else:
create_and_run_agent(MagicMock(), config)
@pytest.mark.parametrize(
"registry_uri, valid",
[
# Valid URIs
("https://123456789012.dkr.ecr.us-west-2.amazonaws.com/my-repo", True),
(
"https://123456789012.dkr.ecr.us-west-2.amazonaws.com/my-repo.withdash-dot/andslash",
True,
),
("https://myregistry.azurecr.io/my-repo", True),
("https://us-central1-docker.pkg.dev/my-project/my-repo/my-image", True),
("https://myregistry.com/my-repo", True),
# Invalid URIs
("https://123456789012.dkr.ecr.us-west-2.amazonaws.com/my-repo:tag", False),
(
"https://123456789012.dkr.ecr.us-west-2.amazonaws.com/my-repo.withdash-dot/andslash:tag",
False,
),
("https://myregistry.azurecr.io/my-repo:tag", False),
("https://us-central1-docker.pkg.dev/my-project/my-repo/my-image:tag", False),
("https://us-central1-docker.pkg.dev/my-project/my-repo", False),
],
)
def test_validate_registry_uri(registry_uri, valid):
"""Test that we validated the registry URI correctly."""
if not valid:
with pytest.raises(ValueError):
validate_registry_uri(registry_uri)
else:
validate_registry_uri(registry_uri)
def test_resolve_agent_config(monkeypatch, runner):
monkeypatch.setattr(
"wandb.sdk.launch._launch.LAUNCH_CONFIG_FILE",
"./config/wandb/launch-config.yaml",
)
monkeypatch.setenv("WANDB_ENTITY", "diffentity")
with runner.isolated_filesystem():
os.makedirs("./config/wandb")
with open("./config/wandb/launch-config.yaml", "w") as f:
yaml.dump(
{
"entity": "different-entity",
"max_jobs": 2,
"registry": {"url": "test"},
},
f,
)
config, returned_api = resolve_agent_config(
entity=None,
max_jobs=-1,
queues=["diff-queue"],
config=None,
verbosity=None,
)
assert config["registry"] == {"url": "test"}
assert config["entity"] == "diffentity"
assert config["max_jobs"] == -1
| MockAgent |
python | google__jax | tests/pallas/tpu_pallas_async_test.py | {
"start": 31597,
"end": 33494
} | class ____(parameterized.TestCase):
def setUp(self):
super().setUp()
if not jtu.is_device_tpu_at_least(4):
self.skipTest('DMAs only guaranteed to work ou TPU v4+')
def test_basic_stateful_async_copy(self):
@jax.jit
def f(x):
y = jnp.zeros_like(x)
def body(refs):
copy_start, copy_done = make_stateful_async_copy()
x_ref, y_ref = refs
fut = copy_start(x_ref, y_ref)
copy_done(x_ref, y_ref, fut)
_, y = state_discharge.run_state(body)((x, y))
return y
x = jax.random.normal(jax.random.key(0), (8, 128), dtype=jnp.float32)
y = f(x)
np.testing.assert_array_equal(y, x)
def test_multiple_stateful_async_copy(self):
@jax.jit
def f(x):
y = y2 = jnp.zeros_like(x)
def body(refs):
copy_start, copy_done = make_stateful_async_copy()
x_ref, y_ref, y2_ref = refs
fut = copy_start(x_ref, y_ref)
fut2 = copy_start(x_ref, y2_ref)
copy_done(x_ref, y_ref, fut)
copy_done(x_ref, y2_ref, fut2)
_, y, y2 = state_discharge.run_state(body)((x, y, y2))
return y, y2
x = jax.random.normal(jax.random.key(0), (8, 128), dtype=jnp.float32)
y, y2 = f(x)
np.testing.assert_array_equal(y, x)
np.testing.assert_array_equal(y2, x)
def test_basic_stateful_async_slice(self):
@jax.jit
def f(x):
y = jnp.zeros(x.shape[1:], x.dtype)
def body(refs):
copy_start, copy_done = make_stateful_async_slice(2)
x_ref, y_ref = refs
fut = copy_start(x_ref, y_ref)
copy_done(x_ref, y_ref, fut)
_, y = state_discharge.run_state(body)((x, y))
return y
x = jax.random.normal(jax.random.key(0), (4, 8, 128), dtype=jnp.float32)
y = f(x)
np.testing.assert_array_equal(y, x[2])
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| PallasCallStatefulAsyncTest |
python | django__django | django/contrib/gis/db/models/lookups.py | {
"start": 6095,
"end": 6308
} | class ____(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's
bounding box.
"""
lookup_name = "bboverlaps"
@BaseSpatialField.register_lookup
| BBOverlapsLookup |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 28205,
"end": 39973
} | class ____(ColumnCollectionCommon, fixtures.TestBase):
def _column_collection(self, columns=None):
return DedupeColumnCollection(columns=columns)
def test_separate_key_cols(self):
c1, c2 = sql.column("col1"), sql.column("col2")
assert_raises_message(
exc.ArgumentError,
"DedupeColumnCollection requires columns be under "
"the same key as their .key",
self._column_collection,
[("kcol1", c1), ("kcol2", c2)],
)
cc = self._column_collection()
assert_raises_message(
exc.ArgumentError,
"DedupeColumnCollection requires columns be under "
"the same key as their .key",
cc.add,
c1,
"kcol1",
)
def test_pickle_w_mutation(self):
c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3")
c2.key = "foo"
cc = self._column_collection(columns=[("c1", c1), ("foo", c2)])
ci = cc.as_readonly()
d = {"cc": cc, "ci": ci}
for loads, dumps in picklers():
dp = loads(dumps(d))
cp = dp["cc"]
cpi = dp["ci"]
self._assert_collection_integrity(cp)
self._assert_collection_integrity(cpi)
assert cp._colset is cpi._colset
assert cp._index is cpi._index
assert cp._collection is cpi._collection
cp.add(c3)
eq_(cp.keys(), ["c1", "foo", "c3"])
eq_(cpi.keys(), ["c1", "foo", "c3"])
assert cp.contains_column(c3)
assert cpi.contains_column(c3)
def test_keys_after_replace(self):
c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3")
c2.key = "foo"
cc = self._column_collection(
columns=[("c1", c1), ("foo", c2), ("c3", c3)]
)
eq_(cc.keys(), ["c1", "foo", "c3"])
c4 = sql.column("c3")
cc.replace(c4)
eq_(cc.keys(), ["c1", "foo", "c3"])
self._assert_collection_integrity(cc)
def test_dupes_add_dedupe(self):
cc = DedupeColumnCollection()
c1, c2a, c3, c2b = (
column("c1"),
column("c2"),
column("c3"),
column("c2"),
)
cc.add(c1)
cc.add(c2a)
cc.add(c3)
cc.add(c2b)
eq_(cc._all_columns, [c1, c2b, c3])
eq_(list(cc), [c1, c2b, c3])
assert not cc.contains_column(c2a)
assert cc.contains_column(c2b)
self._assert_collection_integrity(cc)
def test_dupes_construct_dedupe(self):
c1, c2a, c3, c2b = (
column("c1"),
column("c2"),
column("c3"),
column("c2"),
)
cc = DedupeColumnCollection(
columns=[("c1", c1), ("c2", c2a), ("c3", c3), ("c2", c2b)]
)
eq_(cc._all_columns, [c1, c2b, c3])
eq_(list(cc), [c1, c2b, c3])
assert not cc.contains_column(c2a)
assert cc.contains_column(c2b)
self._assert_collection_integrity(cc)
def test_identical_dupe_add_dedupes(self):
cc = DedupeColumnCollection()
c1, c2, c3 = (column("c1"), column("c2"), column("c3"))
cc.add(c1)
cc.add(c2)
cc.add(c3)
cc.add(c2)
eq_(cc._all_columns, [c1, c2, c3])
# for iter, c2a is replaced by c2b, ordering
# is maintained in that way. ideally, iter would be
# the same as the "_all_columns" collection.
eq_(list(cc), [c1, c2, c3])
assert cc.contains_column(c2)
self._assert_collection_integrity(cc)
ci = cc.as_readonly()
eq_(ci._all_columns, [c1, c2, c3])
eq_(list(ci), [c1, c2, c3])
def test_identical_dupe_construct_dedupes(self):
c1, c2, c3 = (column("c1"), column("c2"), column("c3"))
cc = DedupeColumnCollection(
columns=[("c1", c1), ("c2", c2), ("c3", c3), ("c2", c2)]
)
eq_(cc._all_columns, [c1, c2, c3])
# for iter, c2a is replaced by c2b, ordering
# is maintained in that way. ideally, iter would be
# the same as the "_all_columns" collection.
eq_(list(cc), [c1, c2, c3])
assert cc.contains_column(c2)
self._assert_collection_integrity(cc)
ci = cc.as_readonly()
eq_(ci._all_columns, [c1, c2, c3])
eq_(list(ci), [c1, c2, c3])
def test_replace(self):
cc = DedupeColumnCollection()
ci = cc.as_readonly()
c1, c2a, c3, c2b = (
column("c1"),
column("c2"),
column("c3"),
column("c2"),
)
cc.add(c1)
cc.add(c2a)
cc.add(c3)
cc.replace(c2b)
eq_(cc._all_columns, [c1, c2b, c3])
eq_(list(cc), [c1, c2b, c3])
is_(cc[1], c2b)
assert not cc.contains_column(c2a)
assert cc.contains_column(c2b)
self._assert_collection_integrity(cc)
eq_(ci._all_columns, [c1, c2b, c3])
eq_(list(ci), [c1, c2b, c3])
is_(ci[1], c2b)
def test_replace_key_matches_name_of_another(self):
cc = DedupeColumnCollection()
ci = cc.as_readonly()
c1, c2a, c3, c2b = (
column("c1"),
column("c2"),
column("c3"),
column("c4"),
)
c2b.key = "c2"
cc.add(c1)
cc.add(c2a)
cc.add(c3)
cc.replace(c2b)
eq_(cc._all_columns, [c1, c2b, c3])
eq_(list(cc), [c1, c2b, c3])
is_(cc[1], c2b)
self._assert_collection_integrity(cc)
assert not cc.contains_column(c2a)
assert cc.contains_column(c2b)
eq_(ci._all_columns, [c1, c2b, c3])
eq_(list(ci), [c1, c2b, c3])
is_(ci[1], c2b)
def test_replace_key_matches(self):
cc = DedupeColumnCollection()
ci = cc.as_readonly()
c1, c2a, c3, c2b = (
column("c1"),
column("c2"),
column("c3"),
column("X"),
)
c2b.key = "c2"
cc.add(c1)
cc.add(c2a)
cc.add(c3)
cc.replace(c2b)
assert not cc.contains_column(c2a)
assert cc.contains_column(c2b)
is_(cc[1], c2b)
assert_raises(IndexError, lambda: cc[3])
self._assert_collection_integrity(cc)
eq_(cc._all_columns, [c1, c2b, c3])
eq_(list(cc), [c1, c2b, c3])
eq_(ci._all_columns, [c1, c2b, c3])
eq_(list(ci), [c1, c2b, c3])
is_(ci[1], c2b)
assert_raises(IndexError, lambda: ci[3])
def test_replace_name_matches(self):
cc = DedupeColumnCollection()
ci = cc.as_readonly()
c1, c2a, c3, c2b = (
column("c1"),
column("c2"),
column("c3"),
column("c2"),
)
c2b.key = "X"
cc.add(c1)
cc.add(c2a)
cc.add(c3)
cc.replace(c2b)
assert not cc.contains_column(c2a)
assert cc.contains_column(c2b)
eq_(cc._all_columns, [c1, c2b, c3])
eq_(list(cc), [c1, c2b, c3])
eq_(len(cc), 3)
is_(cc[1], c2b)
self._assert_collection_integrity(cc)
eq_(ci._all_columns, [c1, c2b, c3])
eq_(list(ci), [c1, c2b, c3])
eq_(len(ci), 3)
is_(ci[1], c2b)
def test_replace_no_match(self):
cc = DedupeColumnCollection()
ci = cc.as_readonly()
c1, c2, c3, c4 = column("c1"), column("c2"), column("c3"), column("c4")
c4.key = "X"
cc.add(c1)
cc.add(c2)
cc.add(c3)
cc.replace(c4)
assert cc.contains_column(c2)
assert cc.contains_column(c4)
eq_(cc._all_columns, [c1, c2, c3, c4])
eq_(list(cc), [c1, c2, c3, c4])
is_(cc[3], c4)
self._assert_collection_integrity(cc)
eq_(ci._all_columns, [c1, c2, c3, c4])
eq_(list(ci), [c1, c2, c3, c4])
is_(ci[3], c4)
def test_replace_switch_key_name(self):
c1 = column("id")
c2 = column("street")
c3 = column("user_id")
cc = DedupeColumnCollection(
columns=[("id", c1), ("street", c2), ("user_id", c3)]
)
# for replace col with different key than name, it necessarily
# removes two columns
c4 = column("id")
c4.key = "street"
cc.replace(c4)
eq_(list(cc), [c4, c3])
self._assert_collection_integrity(cc)
def test_remove(self):
c1, c2, c3 = column("c1"), column("c2"), column("c3")
cc = DedupeColumnCollection(
columns=[("c1", c1), ("c2", c2), ("c3", c3)]
)
ci = cc.as_readonly()
eq_(cc._all_columns, [c1, c2, c3])
eq_(list(cc), [c1, c2, c3])
assert cc.contains_column(c2)
assert "c2" in cc
eq_(ci._all_columns, [c1, c2, c3])
eq_(list(ci), [c1, c2, c3])
assert ci.contains_column(c2)
assert "c2" in ci
cc.remove(c2)
eq_(cc._all_columns, [c1, c3])
eq_(list(cc), [c1, c3])
is_(cc[0], c1)
is_(cc[1], c3)
assert not cc.contains_column(c2)
assert "c2" not in cc
self._assert_collection_integrity(cc)
eq_(ci._all_columns, [c1, c3])
eq_(list(ci), [c1, c3])
is_(ci[0], c1)
is_(ci[1], c3)
assert not ci.contains_column(c2)
assert "c2" not in ci
assert_raises(IndexError, lambda: ci[2])
def test_remove_doesnt_change_iteration(self):
c1, c2, c3, c4, c5 = (
column("c1"),
column("c2"),
column("c3"),
column("c4"),
column("c5"),
)
cc = DedupeColumnCollection(
columns=[
("c1", c1),
("c2", c2),
("c3", c3),
("c4", c4),
("c5", c5),
]
)
for col in cc:
if col.name not in ["c1", "c2"]:
cc.remove(col)
eq_(cc.keys(), ["c1", "c2"])
eq_([c.name for c in cc], ["c1", "c2"])
self._assert_collection_integrity(cc)
def test_dupes_extend(self):
cc = DedupeColumnCollection()
ci = cc.as_readonly()
c1, c2a, c3, c2b = (
column("c1"),
column("c2"),
column("c3"),
column("c2"),
)
cc.add(c1)
cc.add(c2a)
cc.extend([c3, c2b]) # this should remove c2a
eq_(cc._all_columns, [c1, c2b, c3])
eq_(list(cc), [c1, c2b, c3])
is_(cc[1], c2b)
is_(cc[2], c3)
assert_raises(IndexError, lambda: cc[3])
self._assert_collection_integrity(cc)
assert not cc.contains_column(c2a)
assert cc.contains_column(c2b)
eq_(ci._all_columns, [c1, c2b, c3])
eq_(list(ci), [c1, c2b, c3])
is_(ci[1], c2b)
is_(ci[2], c3)
assert_raises(IndexError, lambda: ci[3])
assert not ci.contains_column(c2a)
assert ci.contains_column(c2b)
def test_extend_existing_maintains_ordering(self):
cc = DedupeColumnCollection()
c1, c2, c3, c4, c5 = (
column("c1"),
column("c2"),
column("c3"),
column("c4"),
column("c5"),
)
cc.extend([c1, c2])
eq_(cc._all_columns, [c1, c2])
self._assert_collection_integrity(cc)
cc.extend([c3])
eq_(cc._all_columns, [c1, c2, c3])
self._assert_collection_integrity(cc)
cc.extend([c4, c2, c5])
eq_(cc._all_columns, [c1, c2, c3, c4, c5])
self._assert_collection_integrity(cc)
| DedupeColumnCollectionTest |
python | readthedocs__readthedocs.org | readthedocs/config/models.py | {
"start": 1978,
"end": 2120
} | class ____(ConfigBaseModel):
path: str
method: Literal["pip", "setuptools"] = "pip"
extra_requirements: list[str] = []
| PythonInstall |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/benchmarks/utils.py | {
"start": 6262,
"end": 45936
} | class ____:
"""Results for a PDS-H or PDS-DS query run."""
queries: list[int]
suffix: str
executor: ExecutorType
runtime: str
stream_policy: str | None
cluster: str
scheduler: str # Deprecated, kept for backward compatibility
n_workers: int
versions: PackageVersions = dataclasses.field(
default_factory=PackageVersions.collect
)
records: dict[int, list[Record]] = dataclasses.field(default_factory=dict)
dataset_path: Path
scale_factor: int | float
shuffle: Literal["rapidsmpf", "tasks"] | None = None
gather_shuffle_stats: bool = False
broadcast_join_limit: int | None = None
blocksize: int | None = None
max_rows_per_partition: int | None = None
threads: int
iterations: int
timestamp: str = dataclasses.field(
default_factory=lambda: datetime.now(timezone.utc).isoformat()
)
hardware: HardwareInfo = dataclasses.field(default_factory=HardwareInfo.collect)
rmm_async: bool
rapidsmpf_oom_protection: bool
rapidsmpf_spill: bool
spill_device: float
query_set: str
collect_traces: bool = False
stats_planning: bool
max_io_threads: int
def __post_init__(self) -> None: # noqa: D105
if self.gather_shuffle_stats and self.shuffle != "rapidsmpf":
raise ValueError(
"gather_shuffle_stats is only supported when shuffle='rapidsmpf'."
)
@classmethod
def from_args(cls, args: argparse.Namespace) -> RunConfig:
"""Create a RunConfig from command line arguments."""
executor: ExecutorType = args.executor
cluster = args.cluster
scheduler = args.scheduler
runtime = args.runtime
stream_policy = args.stream_policy
# Handle "auto" stream policy
if stream_policy == "auto":
stream_policy = None
# Deal with deprecated scheduler argument
# and non-streaming executors
if executor == "in-memory" or executor == "cpu":
cluster = None
scheduler = None
elif scheduler is not None:
if cluster is not None:
raise ValueError(
"Cannot specify both -s/--scheduler and -c/--cluster. "
"Please use -c/--cluster only."
)
else:
warnings.warn(
"The -s/--scheduler argument is deprecated. Use -c/--cluster instead.",
FutureWarning,
stacklevel=2,
)
cluster = "single" if scheduler == "synchronous" else "distributed"
elif cluster is not None:
scheduler = "synchronous" if cluster == "single" else "distributed"
else:
cluster = "single"
scheduler = "synchronous"
path = args.path
name = args.query_set
scale_factor = args.scale
if scale_factor is None:
if "pdsds" in name:
raise ValueError(
"--scale is required for PDS-DS benchmarks.\n"
"TODO: This will be inferred once we maintain a map of scale factors to row counts."
)
if path is None:
raise ValueError(
"Must specify --root and --scale if --path is not specified."
)
# For PDS-H, infer scale factor based on row count
scale_factor = _infer_scale_factor(name, path, args.suffix)
if path is None:
path = f"{args.root}/scale-{scale_factor}"
scale_factor = float(scale_factor)
try:
scale_factor_int = int(scale_factor)
except ValueError:
pass
else:
if scale_factor_int == scale_factor:
scale_factor = scale_factor_int
skip_scale_factor_inference = (
"LIBCUDF_IO_REROUTE_LOCAL_DIR_PATTERN" in os.environ
) and ("LIBCUDF_IO_REROUTE_REMOTE_DIR_PATTERN" in os.environ)
if (
"pdsh" in name
and args.scale is not None
and skip_scale_factor_inference is False
):
# Validate the user-supplied scale factor
sf_inf = _infer_scale_factor(name, path, args.suffix)
rel_error = abs((scale_factor - sf_inf) / sf_inf)
if rel_error > 0.01:
raise ValueError(
f"Specified scale factor is {args.scale}, "
f"but the inferred scale factor is {sf_inf}."
)
return cls(
queries=args.query,
executor=executor,
cluster=cluster,
scheduler=scheduler,
runtime=runtime,
stream_policy=stream_policy,
n_workers=args.n_workers,
shuffle=args.shuffle,
gather_shuffle_stats=args.rapidsmpf_dask_statistics,
broadcast_join_limit=args.broadcast_join_limit,
dataset_path=path,
scale_factor=scale_factor,
blocksize=args.blocksize,
threads=args.threads,
iterations=args.iterations,
suffix=args.suffix,
rmm_async=args.rmm_async,
rapidsmpf_oom_protection=args.rapidsmpf_oom_protection,
spill_device=args.spill_device,
rapidsmpf_spill=args.rapidsmpf_spill,
max_rows_per_partition=args.max_rows_per_partition,
query_set=args.query_set,
collect_traces=args.collect_traces,
stats_planning=args.stats_planning,
max_io_threads=args.max_io_threads,
)
def serialize(self, engine: pl.GPUEngine | None) -> dict:
"""Serialize the run config to a dictionary."""
result = dataclasses.asdict(self)
if engine is not None:
config_options = ConfigOptions.from_polars_engine(engine)
result["config_options"] = dataclasses.asdict(config_options)
return result
def summarize(self) -> None:
"""Print a summary of the results."""
print("Iteration Summary")
print("=======================================")
for query, records in self.records.items():
print(f"query: {query}")
print(f"path: {self.dataset_path}")
print(f"scale_factor: {self.scale_factor}")
print(f"executor: {self.executor}")
print(f"stream_policy: {self.stream_policy}")
if self.executor == "streaming":
print(f"runtime: {self.runtime}")
print(f"cluster: {self.cluster}")
print(f"blocksize: {self.blocksize}")
print(f"shuffle_method: {self.shuffle}")
print(f"broadcast_join_limit: {self.broadcast_join_limit}")
print(f"stats_planning: {self.stats_planning}")
if self.cluster == "distributed":
print(f"n_workers: {self.n_workers}")
print(f"threads: {self.threads}")
print(f"rmm_async: {self.rmm_async}")
print(f"rapidsmpf_oom_protection: {self.rapidsmpf_oom_protection}")
print(f"spill_device: {self.spill_device}")
print(f"rapidsmpf_spill: {self.rapidsmpf_spill}")
if len(records) > 0:
print(f"iterations: {self.iterations}")
print("---------------------------------------")
print(f"min time : {min(record.duration for record in records):0.4f}")
print(f"max time : {max(record.duration for record in records):0.4f}")
print(
f"mean time: {statistics.mean(record.duration for record in records):0.4f}"
)
print("=======================================")
total_mean_time = sum(
statistics.mean(record.duration for record in records)
for records in self.records.values()
if records
)
print(f"Total mean time across all queries: {total_mean_time:.4f} seconds")
def get_data(path: str | Path, table_name: str, suffix: str = "") -> pl.LazyFrame:
"""Get table from dataset."""
return pl.scan_parquet(f"{path}/{table_name}{suffix}")
def get_executor_options(
run_config: RunConfig, benchmark: Any = None
) -> dict[str, Any]:
"""Generate executor_options for GPUEngine."""
executor_options: dict[str, Any] = {}
if run_config.executor == "streaming":
if run_config.blocksize:
executor_options["target_partition_size"] = run_config.blocksize
if run_config.max_rows_per_partition:
executor_options["max_rows_per_partition"] = (
run_config.max_rows_per_partition
)
if run_config.shuffle:
executor_options["shuffle_method"] = run_config.shuffle
if run_config.broadcast_join_limit:
executor_options["broadcast_join_limit"] = run_config.broadcast_join_limit
if run_config.rapidsmpf_spill:
executor_options["rapidsmpf_spill"] = run_config.rapidsmpf_spill
if run_config.cluster == "distributed":
executor_options["cluster"] = "distributed"
executor_options["stats_planning"] = {
"use_reduction_planning": run_config.stats_planning,
"use_sampling": (
# Always allow row-group sampling for rapidsmpf runtime
run_config.stats_planning or run_config.runtime == "rapidsmpf"
),
}
executor_options["client_device_threshold"] = run_config.spill_device
executor_options["runtime"] = run_config.runtime
executor_options["max_io_threads"] = run_config.max_io_threads
if (
benchmark
and benchmark.__name__ == "PDSHQueries"
and run_config.executor == "streaming"
# Only use the unique_fraction config if stats_planning is disabled
and not run_config.stats_planning
):
executor_options["unique_fraction"] = {
"c_custkey": 0.05,
"l_orderkey": 1.0,
"l_partkey": 0.1,
"o_custkey": 0.25,
}
return executor_options
def print_query_plan(
q_id: int,
q: pl.LazyFrame,
args: argparse.Namespace,
run_config: RunConfig,
engine: None | pl.GPUEngine = None,
) -> None:
"""Print the query plan."""
if run_config.executor == "cpu":
if args.explain_logical:
print(f"\nQuery {q_id} - Logical plan\n")
print(q.explain())
if args.explain:
print(f"\nQuery {q_id} - Physical plan\n")
print(q.show_graph(engine="streaming", plan_stage="physical"))
elif CUDF_POLARS_AVAILABLE:
assert isinstance(engine, pl.GPUEngine)
if args.explain_logical:
print(f"\nQuery {q_id} - Logical plan\n")
print(explain_query(q, engine, physical=False))
if args.explain and run_config.executor == "streaming":
print(f"\nQuery {q_id} - Physical plan\n")
print(explain_query(q, engine))
else:
raise RuntimeError(
"Cannot provide the logical or physical plan because cudf_polars is not installed."
)
def initialize_dask_cluster(run_config: RunConfig, args: argparse.Namespace): # type: ignore[no-untyped-def]
"""Initialize a Dask distributed cluster."""
if run_config.cluster != "distributed":
return None
from dask_cuda import LocalCUDACluster
from distributed import Client
kwargs = {
"n_workers": run_config.n_workers,
"dashboard_address": ":8585",
"protocol": args.protocol,
"rmm_pool_size": args.rmm_pool_size,
"rmm_async": args.rmm_async,
"rmm_release_threshold": args.rmm_release_threshold,
"threads_per_worker": run_config.threads,
}
# Avoid UVM in distributed cluster
client = Client(LocalCUDACluster(**kwargs))
client.wait_for_workers(run_config.n_workers)
if run_config.shuffle != "tasks":
try:
from rapidsmpf.config import Options
from rapidsmpf.integrations.dask import bootstrap_dask_cluster
bootstrap_dask_cluster(
client,
options=Options(
{
"dask_spill_device": str(run_config.spill_device),
"dask_statistics": str(args.rapidsmpf_dask_statistics),
"dask_print_statistics": str(args.rapidsmpf_print_statistics),
"oom_protection": str(args.rapidsmpf_oom_protection),
}
),
)
# Setting this globally makes the peak statistics not meaningful
# across queries / iterations. But doing it per query isn't worth
# the effort right now.
client.run(rmm.statistics.enable_statistics)
except ImportError as err:
if run_config.shuffle == "rapidsmpf":
raise ImportError(
"rapidsmpf is required for shuffle='rapidsmpf' but is not installed."
) from err
return client
def execute_query(
q_id: int,
i: int,
q: pl.LazyFrame,
run_config: RunConfig,
args: argparse.Namespace,
engine: None | pl.GPUEngine = None,
) -> pl.DataFrame:
"""Execute a query with NVTX annotation."""
with nvtx.annotate(
message=f"Query {q_id} - Iteration {i}",
domain="cudf_polars",
color="green",
):
if run_config.executor == "cpu":
return q.collect(engine="streaming")
elif CUDF_POLARS_AVAILABLE:
assert isinstance(engine, pl.GPUEngine)
if args.debug:
translator = Translator(q._ldf.visit(), engine)
ir = translator.translate_ir()
context = IRExecutionContext.from_config_options(
translator.config_options
)
if run_config.executor == "in-memory":
return ir.evaluate(
cache={}, timer=None, context=context
).to_polars()
elif run_config.executor == "streaming":
return evaluate_streaming(
ir,
translator.config_options,
)
assert_never(run_config.executor)
else:
return q.collect(engine=engine)
else:
raise RuntimeError("The requested engine is not supported.")
def _query_type(num_queries: int) -> Callable[[str | int], list[int]]:
def parse(query: str | int) -> list[int]:
if isinstance(query, int):
return [query]
if query == "all":
return list(range(1, num_queries + 1))
result: set[int] = set()
for part in query.split(","):
if "-" in part:
start, end = part.split("-")
result.update(range(int(start), int(end) + 1))
else:
result.add(int(part))
return sorted(result)
return parse
def parse_args(
args: Sequence[str] | None = None, num_queries: int = 22
) -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
prog="Cudf-Polars PDS-H Benchmarks",
description="Experimental streaming-executor benchmarks.",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"query",
type=_query_type(num_queries),
help=textwrap.dedent("""\
Query to run. One of the following:
- A single number (e.g. 11)
- A comma-separated list of query numbers (e.g. 1,3,7)
- A range of query number (e.g. 1-11,23-34)
- The string 'all' to run all queries (1 through 22)"""),
)
parser.add_argument(
"--path",
type=str,
default=os.environ.get("PDSH_DATASET_PATH"),
help=textwrap.dedent("""\
Path to the root directory of the PDS-H dataset.
Defaults to the PDSH_DATASET_PATH environment variable."""),
)
parser.add_argument(
"--root",
type=str,
default=os.environ.get("PDSH_DATASET_ROOT"),
help="Root PDS-H dataset directory (ignored if --path is used).",
)
parser.add_argument(
"--scale",
type=str,
default=None,
help="Dataset scale factor.",
)
parser.add_argument(
"--suffix",
type=str,
default=".parquet",
help=textwrap.dedent("""\
File suffix for input table files.
Default: .parquet"""),
)
parser.add_argument(
"-e",
"--executor",
default="streaming",
type=str,
choices=["in-memory", "streaming", "cpu"],
help=textwrap.dedent("""\
Query executor backend:
- in-memory : Evaluate query in GPU memory
- streaming : Partitioned evaluation (default)
- cpu : Use Polars CPU engine"""),
)
parser.add_argument(
"-c",
"--cluster",
default=None,
type=str,
choices=["single", "distributed"],
help=textwrap.dedent("""\
Cluster type to use with the 'streaming' executor.
- single : Run locally in a single process
- distributed : Use Dask for multi-GPU execution"""),
)
parser.add_argument(
"-s",
"--scheduler",
default=None,
type=str,
choices=["synchronous", "distributed"],
help=textwrap.dedent("""\
*Deprecated*: Use --cluster instead.
Scheduler type to use with the 'streaming' executor.
- synchronous : Run locally in a single process
- distributed : Use Dask for multi-GPU execution"""),
)
parser.add_argument(
"--runtime",
type=str,
choices=["tasks", "rapidsmpf"],
default="tasks",
help="Runtime to use for the streaming executor (tasks or rapidsmpf).",
)
parser.add_argument(
"--stream-policy",
type=str,
choices=["auto", "default", "new", "pool"],
default="auto",
help=textwrap.dedent("""\
CUDA stream policy (auto, default, new, pool).
Default: auto (use the default policy for the runtime)"""),
)
parser.add_argument(
"--n-workers",
default=1,
type=int,
help="Number of Dask-CUDA workers (requires 'distributed' cluster).",
)
parser.add_argument(
"--blocksize",
default=None,
type=int,
help="Target partition size, in bytes, for IO tasks.",
)
parser.add_argument(
"--max-rows-per-partition",
default=None,
type=int,
help="The maximum number of rows to process per partition.",
)
parser.add_argument(
"--iterations",
default=1,
type=int,
help="Number of times to run the same query.",
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="Debug run.",
)
parser.add_argument(
"--protocol",
default="ucx",
type=str,
choices=["ucx"],
help="Communication protocol to use for Dask: ucx (uses ucxx)",
)
parser.add_argument(
"--shuffle",
default=None,
type=str,
choices=[None, "rapidsmpf", "tasks"],
help="Shuffle method to use for distributed execution.",
)
parser.add_argument(
"--broadcast-join-limit",
default=None,
type=int,
help="Set an explicit `broadcast_join_limit` option.",
)
parser.add_argument(
"--threads",
default=1,
type=int,
help="Number of threads to use on each GPU.",
)
parser.add_argument(
"--rmm-pool-size",
default=None,
type=float,
help=textwrap.dedent("""\
Fraction of total GPU memory to allocate for RMM pool.
Default: 0.5 (50%% of GPU memory) when --no-rmm-async,
None when --rmm-async"""),
)
parser.add_argument(
"--rmm-release-threshold",
default=None,
type=float,
help=textwrap.dedent("""\
Passed to dask_cuda.LocalCUDACluster to control the release
threshold for RMM pool memory.
Default: None (no release threshold)"""),
)
parser.add_argument(
"--rmm-async",
action=argparse.BooleanOptionalAction,
default=False,
help="Use RMM async memory resource. Note: only affects distributed cluster!",
)
parser.add_argument(
"--rapidsmpf-oom-protection",
action=argparse.BooleanOptionalAction,
default=False,
help="Use rapidsmpf CUDA managed memory-based OOM protection.",
)
parser.add_argument(
"--rapidsmpf-dask-statistics",
action=argparse.BooleanOptionalAction,
default=False,
help="Collect rapidsmpf shuffle statistics. The output will be stored in the 'shuffle_stats' field of each record.",
)
parser.add_argument(
"--rapidsmpf-print-statistics",
action=argparse.BooleanOptionalAction,
default=False,
help="Print rapidsmpf shuffle statistics on each Dask worker upon completion.",
)
parser.add_argument(
"--rapidsmpf-spill",
action=argparse.BooleanOptionalAction,
default=False,
help="Use rapidsmpf for general spilling.",
)
parser.add_argument(
"--spill-device",
default=0.5,
type=float,
help="Rapidsmpf device spill threshold.",
)
parser.add_argument(
"-o",
"--output",
type=argparse.FileType("at"),
default="pdsh_results.jsonl",
help="Output file path.",
)
parser.add_argument(
"--summarize",
action=argparse.BooleanOptionalAction,
help="Summarize the results.",
default=True,
)
parser.add_argument(
"--print-results",
action=argparse.BooleanOptionalAction,
help="Print the query results",
default=True,
)
parser.add_argument(
"--explain",
action=argparse.BooleanOptionalAction,
help="Print an outline of the physical plan",
default=False,
)
parser.add_argument(
"--explain-logical",
action=argparse.BooleanOptionalAction,
help="Print an outline of the logical plan",
default=False,
)
parser.add_argument(
"--validate",
action=argparse.BooleanOptionalAction,
default=False,
help="Validate the result against CPU execution.",
)
parser.add_argument(
"--baseline",
choices=["duckdb", "cpu"],
default="duckdb",
help="Which engine to use as the baseline for validation.",
)
parser.add_argument(
"--collect-traces",
action=argparse.BooleanOptionalAction,
default=False,
help="Collect data tracing cudf-polars execution.",
)
parser.add_argument(
"--stats-planning",
action=argparse.BooleanOptionalAction,
default=False,
help="Enable statistics planning.",
)
parser.add_argument(
"--max-io-threads",
default=2,
type=int,
help="Maximum number of IO threads for rapidsmpf runtime.",
)
parsed_args = parser.parse_args(args)
if parsed_args.rmm_pool_size is None and not parsed_args.rmm_async:
# The default rmm pool size depends on the rmm_async flag
parsed_args.rmm_pool_size = 0.5
return parsed_args
def run_polars(
benchmark: Any,
options: Sequence[str] | None = None,
num_queries: int = 22,
) -> None:
"""Run the queries using the given benchmark and executor options."""
args = parse_args(options, num_queries=num_queries)
vars(args).update({"query_set": benchmark.name})
run_config = RunConfig.from_args(args)
validation_failures: list[int] = []
query_failures: list[tuple[int, int]] = []
client = initialize_dask_cluster(run_config, args)
records: defaultdict[int, list[Record]] = defaultdict(list)
engine: pl.GPUEngine | None = None
if run_config.executor != "cpu":
executor_options = get_executor_options(run_config, benchmark=benchmark)
engine = pl.GPUEngine(
raise_on_fail=True,
memory_resource=rmm.mr.CudaAsyncMemoryResource()
if run_config.rmm_async
else None,
cuda_stream_policy=run_config.stream_policy,
executor=run_config.executor,
executor_options=executor_options,
)
for q_id in run_config.queries:
try:
q = getattr(benchmark, f"q{q_id}")(run_config)
except AttributeError as err:
raise NotImplementedError(f"Query {q_id} not implemented.") from err
print_query_plan(q_id, q, args, run_config, engine)
records[q_id] = []
for i in range(args.iterations):
if _HAS_STRUCTLOG and run_config.collect_traces:
setup_logging(q_id, i)
if client is not None:
client.run(setup_logging, q_id, i)
t0 = time.monotonic()
try:
result = execute_query(q_id, i, q, run_config, args, engine)
except Exception:
print(f"❌ query={q_id} iteration={i} failed!")
print(traceback.format_exc())
query_failures.append((q_id, i))
continue
if run_config.shuffle == "rapidsmpf" and run_config.gather_shuffle_stats:
from rapidsmpf.integrations.dask.shuffler import (
clear_shuffle_statistics,
gather_shuffle_statistics,
)
shuffle_stats = gather_shuffle_statistics(client)
clear_shuffle_statistics(client)
else:
shuffle_stats = None
if args.validate and run_config.executor != "cpu":
try:
assert_gpu_result_equal(
q,
engine=engine,
executor=run_config.executor,
check_exact=False,
)
print(f"✅ Query {q_id} passed validation!")
except AssertionError as e:
validation_failures.append(q_id)
print(f"❌ Query {q_id} failed validation!\n{e}")
t1 = time.monotonic()
record = Record(
query=q_id, iteration=i, duration=t1 - t0, shuffle_stats=shuffle_stats
)
if args.print_results:
print(result)
print(
f"Query {q_id} - Iteration {i} finished in {record.duration:0.4f}s",
flush=True,
)
records[q_id].append(record)
run_config = dataclasses.replace(run_config, records=dict(records))
# consolidate logs
if _HAS_STRUCTLOG and run_config.collect_traces:
def gather_logs() -> str:
logger = logging.getLogger()
return logger.handlers[0].stream.getvalue() # type: ignore[attr-defined]
if client is not None:
all_logs = "\n".join(client.run(gather_logs).values())
else:
all_logs = gather_logs()
parsed_logs = [json.loads(log) for log in all_logs.splitlines() if log]
# Some other log records can end up in here. Filter those out.
parsed_logs = [log for log in parsed_logs if log["event"] == "Execute IR"]
# Now we want to augment the existing Records with the trace data.
def group_key(x: dict) -> int:
return x["query_id"]
def sort_key(x: dict) -> tuple[int, int]:
return x["query_id"], x["iteration"]
grouped = itertools.groupby(
sorted(parsed_logs, key=sort_key),
key=group_key,
)
for query_id, run_logs_group in grouped:
run_logs = list(run_logs_group)
by_iteration = [
list(x)
for _, x in itertools.groupby(run_logs, key=lambda x: x["iteration"])
]
run_records = run_config.records[query_id]
assert len(by_iteration) == len(run_records) # same number of iterations
all_traces = [list(iteration) for iteration in by_iteration]
new_records = [
dataclasses.replace(record, traces=traces)
for record, traces in zip(run_records, all_traces, strict=True)
]
run_config.records[query_id] = new_records
if args.summarize:
run_config.summarize()
if client is not None:
client.close(timeout=60)
if args.validate and run_config.executor != "cpu":
print("\nValidation Summary")
print("==================")
if validation_failures:
print(
f"{len(validation_failures)} queries failed validation: {sorted(set(validation_failures))}"
)
else:
print("All validated queries passed.")
args.output.write(json.dumps(run_config.serialize(engine=engine)))
args.output.write("\n")
if query_failures or validation_failures:
sys.exit(1)
def setup_logging(query_id: int, iteration: int) -> None: # noqa: D103
import cudf_polars.dsl.tracing
if not cudf_polars.dsl.tracing.LOG_TRACES:
msg = (
"Tracing requested via --collect-traces, but tracking is not enabled. "
"Verify that 'CUDF_POLARS_LOG_TRACES' is set and structlog is installed."
)
raise RuntimeError(msg)
if _HAS_STRUCTLOG:
# structlog uses contextvars to propagate context down to where log records
# are emitted. Ideally, we'd just set the contextvars here using
# structlog.bind_contextvars; for the distributed cluster we would need
# to use something like client.run to set the contextvars on the worker.
# However, there's an unfortunate conflict between structlog's use of
# context vars and how Dask Workers actually execute tasks, such that
# the contextvars set via `client.run` aren't visible to the actual
# tasks.
#
# So instead we make a new logger each time we need a new context,
# i.e. for each query/iteration pair.
def make_injector(
query_id: int, iteration: int
) -> Callable[[logging.Logger, str, dict[str, Any]], dict[str, Any]]:
def inject(
logger: Any, method_name: Any, event_dict: Any
) -> dict[str, Any]:
event_dict["query_id"] = query_id
event_dict["iteration"] = iteration
return event_dict
return inject
shared_processors = [
structlog.contextvars.merge_contextvars,
make_injector(query_id, iteration),
structlog.processors.add_log_level,
structlog.processors.CallsiteParameterAdder(
parameters=[
structlog.processors.CallsiteParameter.PROCESS,
structlog.processors.CallsiteParameter.THREAD,
],
),
structlog.processors.StackInfoRenderer(),
structlog.dev.set_exc_info,
structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S.%f", utc=False),
]
# For logging to a file
json_renderer = structlog.processors.JSONRenderer()
stream = io.StringIO()
json_file_handler = logging.StreamHandler(stream)
json_file_handler.setFormatter(
structlog.stdlib.ProcessorFormatter(
processor=json_renderer,
foreign_pre_chain=shared_processors,
)
)
logging.basicConfig(level=logging.INFO, handlers=[json_file_handler])
structlog.configure(
processors=[
*shared_processors,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
],
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.make_filtering_bound_logger(logging.INFO),
cache_logger_on_first_use=True,
)
PDSDS_TABLE_NAMES: list[str] = [
"call_center",
"catalog_page",
"catalog_returns",
"catalog_sales",
"customer",
"customer_address",
"customer_demographics",
"date_dim",
"household_demographics",
"income_band",
"inventory",
"item",
"promotion",
"reason",
"ship_mode",
"store",
"store_returns",
"store_sales",
"time_dim",
"warehouse",
"web_page",
"web_returns",
"web_sales",
"web_site",
]
PDSH_TABLE_NAMES: list[str] = [
"customer",
"lineitem",
"nation",
"orders",
"part",
"partsupp",
"region",
"supplier",
]
def print_duckdb_plan(
q_id: int,
sql: str,
dataset_path: Path,
suffix: str,
query_set: str,
args: argparse.Namespace,
) -> None:
"""Print DuckDB query plan using EXPLAIN."""
if duckdb is None:
raise ImportError(duckdb_err)
if query_set == "pdsds":
tbl_names = PDSDS_TABLE_NAMES
else:
tbl_names = PDSH_TABLE_NAMES
with duckdb.connect() as conn:
for name in tbl_names:
pattern = (Path(dataset_path) / name).as_posix() + suffix
conn.execute(
f"CREATE OR REPLACE VIEW {name} AS "
f"SELECT * FROM parquet_scan('{pattern}');"
)
if args.explain_logical and args.explain:
conn.execute("PRAGMA explain_output = 'all';")
elif args.explain_logical:
conn.execute("PRAGMA explain_output = 'optimized_only';")
else:
conn.execute("PRAGMA explain_output = 'physical_only';")
print(f"\nDuckDB Query {q_id} - Plan\n")
plan_rows = conn.execute(f"EXPLAIN {sql}").fetchall()
for _, line in plan_rows:
print(line)
def execute_duckdb_query(
query: str,
dataset_path: Path,
*,
suffix: str = ".parquet",
query_set: str = "pdsh",
) -> pl.DataFrame:
"""Execute a query with DuckDB."""
if duckdb is None:
raise ImportError(duckdb_err)
if query_set == "pdsds":
tbl_names = PDSDS_TABLE_NAMES
else:
tbl_names = PDSH_TABLE_NAMES
with duckdb.connect() as conn:
for name in tbl_names:
pattern = (Path(dataset_path) / name).as_posix() + suffix
conn.execute(
f"CREATE OR REPLACE VIEW {name} AS "
f"SELECT * FROM parquet_scan('{pattern}');"
)
return conn.execute(query).pl()
def run_duckdb(
duckdb_queries_cls: Any, options: Sequence[str] | None = None, *, num_queries: int
) -> None:
"""Run the benchmark with DuckDB."""
args = parse_args(options, num_queries=num_queries)
vars(args).update({"query_set": duckdb_queries_cls.name})
run_config = RunConfig.from_args(args)
records: defaultdict[int, list[Record]] = defaultdict(list)
for q_id in run_config.queries:
try:
get_q = getattr(duckdb_queries_cls, f"q{q_id}")
except AttributeError as err:
raise NotImplementedError(f"Query {q_id} not implemented.") from err
sql = get_q(run_config)
if args.explain or args.explain_logical:
print_duckdb_plan(
q_id=q_id,
sql=sql,
dataset_path=run_config.dataset_path,
suffix=run_config.suffix,
query_set=duckdb_queries_cls.name,
args=args,
)
print(f"DuckDB Executing: {q_id}")
records[q_id] = []
for i in range(args.iterations):
t0 = time.time()
result = execute_duckdb_query(
sql,
run_config.dataset_path,
suffix=run_config.suffix,
query_set=duckdb_queries_cls.name,
)
t1 = time.time()
record = Record(query=q_id, iteration=i, duration=t1 - t0)
if args.print_results:
print(result)
print(f"Query {q_id} - Iteration {i} finished in {record.duration:0.4f}s")
records[q_id].append(record)
run_config = dataclasses.replace(run_config, records=dict(records))
if args.summarize:
run_config.summarize()
def run_validate(
polars_queries_cls: Any,
duckdb_queries_cls: Any,
options: Sequence[str] | None = None,
*,
num_queries: int,
check_dtypes: bool,
check_column_order: bool,
) -> None:
"""Validate Polars CPU/GPU vs DuckDB."""
from polars.testing import assert_frame_equal
args = parse_args(options, num_queries=num_queries)
vars(args).update({"query_set": polars_queries_cls.name})
run_config = RunConfig.from_args(args)
baseline = args.baseline
if baseline not in {"duckdb", "cpu"}:
raise ValueError("Baseline must be one of: 'duckdb', 'cpu'")
failures: list[int] = []
engine: pl.GPUEngine | None = None
if run_config.executor != "cpu":
engine = pl.GPUEngine(
raise_on_fail=True,
executor=run_config.executor,
executor_options=get_executor_options(run_config, polars_queries_cls),
)
for q_id in run_config.queries:
print(f"\nValidating Query {q_id}")
try:
get_pl = getattr(polars_queries_cls, f"q{q_id}")
get_ddb = getattr(duckdb_queries_cls, f"q{q_id}")
except AttributeError as err:
raise NotImplementedError(f"Query {q_id} not implemented.") from err
polars_query = get_pl(run_config)
if baseline == "duckdb":
base_sql = get_ddb(run_config)
base_result = execute_duckdb_query(
base_sql,
run_config.dataset_path,
query_set=duckdb_queries_cls.name,
)
else:
base_result = polars_query.collect(engine="streaming")
if run_config.executor == "cpu":
test_result = polars_query.collect(engine="streaming")
else:
try:
test_result = polars_query.collect(engine=engine)
except Exception as e:
failures.append(q_id)
print(f"❌ Query {q_id} failed validation: GPU execution failed.\n{e}")
continue
try:
assert_frame_equal(
base_result,
test_result,
check_dtypes=check_dtypes,
check_column_order=check_column_order,
)
print(f"✅ Query {q_id} passed validation.")
except AssertionError as e:
failures.append(q_id)
print(f"❌ Query {q_id} failed validation:\n{e}")
if args.print_results:
print("Baseline Result:\n", base_result)
print("Test Result:\n", test_result)
if failures:
print("\nValidation Summary:")
print("===================")
print(f"{len(failures)} query(s) failed: {failures}")
else:
print("\nAll queries passed validation.")
| RunConfig |
python | huggingface__transformers | src/transformers/models/regnet/modeling_regnet.py | {
"start": 2959,
"end": 3613
} | class ____(nn.Module):
"""
RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
"""
def __init__(self, in_channels: int, out_channels: int, stride: int = 2):
super().__init__()
self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.normalization = nn.BatchNorm2d(out_channels)
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
return hidden_state
| RegNetShortCut |
python | scikit-learn__scikit-learn | sklearn/manifold/_isomap.py | {
"start": 828,
"end": 15734
} | class ____(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Isomap Embedding.
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : int or None, default=5
Number of neighbors to consider for each point. If `n_neighbors` is an int,
then `radius` must be `None`.
radius : float or None, default=None
Limiting distance of neighbors to return. If `radius` is a float,
then `n_neighbors` must be set to `None`.
.. versionadded:: 1.1
n_components : int, default=2
Number of coordinates for the manifold.
eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float, default=0
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : int, default=None
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : {'auto', 'FW', 'D'}, default='auto'
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \
default='auto'
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int or None, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
metric : str, or callable, default="minkowski"
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a :term:`Glossary <sparse graph>`.
.. versionadded:: 0.22
p : float, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
.. versionadded:: 0.22
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.22
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
:class:`~sklearn.decomposition.KernelPCA` object used to implement the
embedding.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.decomposition.PCA : Principal component analysis that is a linear
dimensionality reduction method.
sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using
kernels and PCA.
MDS : Manifold learning using multidimensional scaling.
TSNE : T-distributed Stochastic Neighbor Embedding.
LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding.
SpectralEmbedding : Spectral embedding for non-linear dimensionality.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import Isomap
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = Isomap(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
"""
_parameter_constraints: dict = {
"n_neighbors": [Interval(Integral, 1, None, closed="left"), None],
"radius": [Interval(Real, 0, None, closed="both"), None],
"n_components": [Interval(Integral, 1, None, closed="left")],
"eigen_solver": [StrOptions({"auto", "arpack", "dense"})],
"tol": [Interval(Real, 0, None, closed="left")],
"max_iter": [Interval(Integral, 1, None, closed="left"), None],
"path_method": [StrOptions({"auto", "FW", "D"})],
"neighbors_algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})],
"n_jobs": [Integral, None],
"p": [Interval(Real, 1, None, closed="left")],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
"metric_params": [dict, None],
}
def __init__(
self,
*,
n_neighbors=5,
radius=None,
n_components=2,
eigen_solver="auto",
tol=0,
max_iter=None,
path_method="auto",
neighbors_algorithm="auto",
n_jobs=None,
metric="minkowski",
p=2,
metric_params=None,
):
self.n_neighbors = n_neighbors
self.radius = radius
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
self.metric = metric
self.p = p
self.metric_params = metric_params
def _fit_transform(self, X):
if self.n_neighbors is not None and self.radius is not None:
raise ValueError(
"Both n_neighbors and radius are provided. Use"
f" Isomap(radius={self.radius}, n_neighbors=None) if intended to use"
" radius-based neighbors"
)
self.nbrs_ = NearestNeighbors(
n_neighbors=self.n_neighbors,
radius=self.radius,
algorithm=self.neighbors_algorithm,
metric=self.metric,
p=self.p,
metric_params=self.metric_params,
n_jobs=self.n_jobs,
)
self.nbrs_.fit(X)
self.n_features_in_ = self.nbrs_.n_features_in_
if hasattr(self.nbrs_, "feature_names_in_"):
self.feature_names_in_ = self.nbrs_.feature_names_in_
self.kernel_pca_ = KernelPCA(
n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol,
max_iter=self.max_iter,
n_jobs=self.n_jobs,
).set_output(transform="default")
if self.n_neighbors is not None:
nbg = kneighbors_graph(
self.nbrs_,
self.n_neighbors,
metric=self.metric,
p=self.p,
metric_params=self.metric_params,
mode="distance",
n_jobs=self.n_jobs,
)
else:
nbg = radius_neighbors_graph(
self.nbrs_,
radius=self.radius,
metric=self.metric,
p=self.p,
metric_params=self.metric_params,
mode="distance",
n_jobs=self.n_jobs,
)
# Compute the number of connected components, and connect the different
# components to be able to compute a shortest path between all pairs
# of samples in the graph.
# Similar fix to cluster._agglomerative._fix_connectivity.
n_connected_components, labels = connected_components(nbg)
if n_connected_components > 1:
if self.metric == "precomputed" and issparse(X):
raise RuntimeError(
"The number of connected components of the neighbors graph"
f" is {n_connected_components} > 1. The graph cannot be "
"completed with metric='precomputed', and Isomap cannot be"
"fitted. Increase the number of neighbors to avoid this "
"issue, or precompute the full distance matrix instead "
"of passing a sparse neighbors graph."
)
warnings.warn(
(
"The number of connected components of the neighbors graph "
f"is {n_connected_components} > 1. Completing the graph to fit"
" Isomap might be slow. Increase the number of neighbors to "
"avoid this issue."
),
stacklevel=2,
)
# use array validated by NearestNeighbors
nbg = _fix_connected_components(
X=self.nbrs_._fit_X,
graph=nbg,
n_connected_components=n_connected_components,
component_labels=labels,
mode="distance",
metric=self.nbrs_.effective_metric_,
**self.nbrs_.effective_metric_params_,
)
self.dist_matrix_ = shortest_path(nbg, method=self.path_method, directed=False)
if self.nbrs_._fit_X.dtype == np.float32:
self.dist_matrix_ = self.dist_matrix_.astype(
self.nbrs_._fit_X.dtype, copy=False
)
G = self.dist_matrix_**2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
self._n_features_out = self.embedding_.shape[1]
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Reconstruction error.
Notes
-----
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_**2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.eigenvalues_
return np.sqrt(np.sum(G_center**2) - np.sum(evals**2)) / G.shape[0]
@_fit_context(
# Isomap.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse matrix, precomputed tree, or NearestNeighbors
object.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns a fitted instance of self.
"""
self._fit_transform(X)
return self
@_fit_context(
# Isomap.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
X transformed in the new space.
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features)
If neighbors_algorithm='precomputed', X is assumed to be a
distance matrix or a sparse graph of shape
(n_queries, n_samples_fit).
Returns
-------
X_new : array-like, shape (n_queries, n_components)
X transformed in the new space.
"""
check_is_fitted(self)
if self.n_neighbors is not None:
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
else:
distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)
# Create the graph of shortest distances from X to
# training data via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
n_samples_fit = self.nbrs_.n_samples_fit_
n_queries = distances.shape[0]
if hasattr(X, "dtype") and X.dtype == np.float32:
dtype = np.float32
else:
dtype = np.float64
G_X = np.zeros((n_queries, n_samples_fit), dtype)
for i in range(n_queries):
G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
tags.input_tags.sparse = True
return tags
| Isomap |
python | openai__openai-python | src/openai/types/responses/response_retrieve_params.py | {
"start": 1771,
"end": 2372
} | class ____(ResponseRetrieveParamsBase):
stream: Required[Literal[True]]
"""
If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
for more information.
"""
ResponseRetrieveParams = Union[ResponseRetrieveParamsNonStreaming, ResponseRetrieveParamsStreaming]
| ResponseRetrieveParamsStreaming |
python | keras-team__keras | keras/src/ops/core.py | {
"start": 440,
"end": 2326
} | class ____(Operation):
def call(self, f, xs):
return backend.core.map(f, xs)
def compute_output_spec(self, f, xs):
x = tree.map_structure(lambda t: t[0], xs)
n = tree.flatten(xs)[0].shape[0]
y = backend.compute_output_spec(f, x)
def append_batch_axis(t):
return KerasTensor(
shape=(n,) + t.shape,
dtype=t.dtype,
sparse=t.sparse,
ragged=t.ragged,
)
y = tree.map_structure(append_batch_axis, y)
return y
@keras_export("keras.ops.map")
def map(f, xs):
"""Map a function over leading array axes.
Like Python’s builtin map, except inputs and outputs are in the form of
stacked arrays. Consider using the `vectorized_map()` transform instead,
unless you need to apply a function element by element for reduced memory
usage or heterogeneous computation with other control flow primitives.
When `xs` is an array type, the semantics of `map()` are given by this
Python implementation:
```python
def map(f, xs):
return np.stack([f(x) for x in xs])
```
Args:
f: Callable defines the function to apply element-wise over the first
axis or axes of `xs`.
xs: Values over which to map along the leading axis.
Returns:
Mapped values.
Examples:
>>> f = lambda x: x**2
>>> xs = keras.ops.arange(10)
>>> ys = keras.ops.map(f, xs)
>>> ys
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> f = lambda x: {"y1": x**2, "y2": x * 10} # Can have nested outputs
>>> ys = keras.ops.map(f, xs)
>>> ys["y1"]
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> ys["y2"]
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
"""
if any_symbolic_tensors((xs,)):
return Map().symbolic_call(f, xs)
return backend.core.map(f, xs)
| Map |
python | etianen__django-reversion | reversion/views.py | {
"start": 1235,
"end": 1981
} | class ____:
"""
A class-based view mixin that wraps the request in a revision.
The revision will have it's user set from the request automatically.
"""
revision_manage_manually = False
revision_using = None
revision_atomic = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dispatch = create_revision(
manage_manually=self.revision_manage_manually,
using=self.revision_using,
atomic=self.revision_atomic,
request_creates_revision=self.revision_request_creates_revision
)(self.dispatch)
def revision_request_creates_revision(self, request):
return _request_creates_revision(request)
| RevisionMixin |
python | catalyst-team__catalyst | catalyst/callbacks/metrics/scikit_learn.py | {
"start": 392,
"end": 5005
} | class ____(FunctionalBatchMetricCallback):
"""SklearnBatchCallback implements an integration of **batch-based** Sklearn metrics
Args:
keys: a dictionary containing:
a mapping between ``metric_fn`` arguments and keys in ``runner.batch``
other arguments needed for ``metric_fn``
metric_fn: metric function that gets outputs, targets, and other arguments given
in ``keys`` and returns score
metric_key: key to store computed metric in ``runner.batch_metrics`` dictionary
log_on_batch: boolean flag to log computed metrics every batch
metric_kwargs: additional parameters for ``metric_fn``
.. note::
catalyst[ml] required for this callback
Examples:
.. code-block:: python
import sklearn
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
from functools import partial
# sample data
num_samples, num_features, num_classes = int(1e4), int(1e1), 4
X = torch.rand(num_samples, num_features)
y = (torch.rand(num_samples, num_classes) > 0.5).to(torch.float32)
# pytorch loaders
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, num_classes)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])
# model training
runner = dl.SupervisedRunner(
input_key="features",
output_key="logits",
target_key="targets",
loss_key="loss"
)
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
num_epochs=3,
verbose=True,
callbacks=[
dl.BatchTransformCallback(
input_key="targets",
output_key="labels",
transform=partial(torch.argmax, dim=1),
scope="on_batch_end",
),
dl.BatchTransformCallback(
input_key="logits",
output_key="scores",
transform=partial(torch.softmax, dim=1),
scope="on_batch_end",
),
dl.BatchTransformCallback(
input_key="scores",
output_key="preds",
transform=partial(torch.argmax, dim=1),
scope="on_batch_end",
),
dl.MultilabelAccuracyCallback(
input_key="logits", target_key="targets", threshold=0.5
),
dl.SklearnBatchCallback(
keys={"y_pred": "preds", "y_true": "labels"},
metric_fn="f1_score",
metric_key="sk_f1",
average="macro",
zero_division=1,
)
]
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
keys: Mapping[str, Any],
metric_fn: Union[Callable, str],
metric_key: str,
log_on_batch: bool = True,
**metric_kwargs
):
"""Init."""
if isinstance(metric_fn, str):
metric_fn = sklearn.metrics.__dict__[metric_fn]
metric_fn = partial(metric_fn, **metric_kwargs)
super().__init__(
metric=FunctionalBatchMetric(metric_fn=metric_fn, metric_key=metric_key),
input_key=keys,
target_key=keys,
log_on_batch=log_on_batch,
)
def _get_key_value_inputs(self, runner: "IRunner") -> Dict[str, torch.Tensor]:
"""Get data from batch in a format suitable for the Sklearn metrics calculation
Args:
runner: current runner
Returns:
dict of inputs and targets tensors
"""
kv_inputs = {}
for key, value in self._keys.items():
kv_inputs[key] = runner.batch[value].cpu().detach()
kv_inputs["batch_size"] = runner.batch_size
return kv_inputs
| SklearnBatchCallback |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI045.py | {
"start": 292,
"end": 433
} | class ____:
def __iter__(self) -> typing.Iterable:
...
def not_iter(self) -> typing.Iterable:
...
| TypingIterableReturn |
python | getsentry__sentry | tests/sentry/backup/test_imports.py | {
"start": 29532,
"end": 33038
} | class ____(ImportTestCase):
"""
Some models are automatically created via signals and similar automagic from related models. We
test that behavior here. Specifically, we test the following:
- That `Email` and `UserEmail` are automatically created when `User` is.
- That `OrganizationMapping` and `OrganizationMemberMapping` are automatically created when
`Organization is.
- That `ProjectKey` and `ProjectOption` instances are automatically created when `Project`
is.
"""
def test_import_signaling_user(self) -> None:
self.create_exhaustive_user("user", email="me@example.com")
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir)
with open(tmp_path, "rb") as tmp_file:
import_in_user_scope(tmp_file, printer=NOOP_PRINTER)
with assume_test_silo_mode(SiloMode.CONTROL):
assert User.objects.count() == 1
assert User.objects.filter(email="me@example.com").exists()
assert UserEmail.objects.count() == 1
assert UserEmail.objects.filter(email="me@example.com").exists()
assert Email.objects.count() == 1
assert Email.objects.filter(email="me@example.com").exists()
def test_import_signaling_organization(self) -> None:
owner = self.create_exhaustive_user("owner")
invited = self.create_exhaustive_user("invited")
member = self.create_exhaustive_user("member")
self.create_exhaustive_organization("some-org", owner, invited, [member])
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir)
with open(tmp_path, "rb") as tmp_file:
import_in_organization_scope(tmp_file, printer=NOOP_PRINTER)
# There should only be 1 organization at this point
imported_organization = Organization.objects.get()
assert imported_organization.slug == "some-org"
assert OrganizationMember.objects.count() == 3
# The exhaustive org has 1 project which automatically gets 1 key and 3 options.
assert Project.objects.count() == 1
assert Project.objects.filter(name="project-some-org").exists()
assert ProjectKey.objects.count() == 1
assert ProjectOption.objects.count() == 1
assert ProjectOption.objects.filter(key="sentry:option-epoch").exists()
with assume_test_silo_mode(SiloMode.CONTROL):
# An organization slug reservation with a valid primary reservation type
# signals that we've synchronously resolved the slug update RPC correctly.
assert OrganizationSlugReservation.objects.filter(
organization_id=imported_organization.id,
slug="some-org",
reservation_type=OrganizationSlugReservationType.PRIMARY,
).exists()
assert OrganizationMapping.objects.count() == 1
assert OrganizationMapping.objects.filter(
organization_id=imported_organization.id, slug="some-org"
).exists()
assert OrganizationMemberMapping.objects.count() == 3
def test_import_signaling_organization_with_control_provisioning_option(self) -> None:
with override_options({"hybrid_cloud.control-organization-provisioning": True}):
self.test_import_signaling_organization()
| SignalingTests |
python | django-import-export__django-import-export | tests/core/migrations/0016_alter_category_options_alter_uuidcategory_options.py | {
"start": 84,
"end": 511
} | class ____(migrations.Migration):
dependencies = [
("core", "0015_withpositiveintegerfields"),
]
operations = [
migrations.AlterModelOptions(
name="category",
options={"verbose_name_plural": "categories"},
),
migrations.AlterModelOptions(
name="uuidcategory",
options={"verbose_name_plural": "UUID categories"},
),
]
| Migration |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 20027,
"end": 21106
} | class ____(Module):
r"""Applies the gated linear unit function.
:math:`{GLU}(a, b)= a \otimes \sigma(b)` where :math:`a` is the first half
of the input matrices and :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
Shape:
- Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
dimensions
- Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`
.. image:: ../scripts/activation_images/GLU.png
Examples::
>>> m = nn.GLU()
>>> input = torch.randn(4, 2)
>>> output = m(input)
"""
__constants__ = ["dim"]
dim: int
def __init__(self, dim: int = -1) -> None:
super().__init__()
self.dim = dim
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.glu(input, self.dim)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return f"dim={self.dim}"
| GLU |
python | pytorch__pytorch | torch/_inductor/codegen/pallas.py | {
"start": 1879,
"end": 10256
} | class ____(OpOverrides):
"""
Map element-wise ops to JAX/Pallas operations.
For now, we use the default Python operators which are compatible
with JAX numpy broadcasting semantics.
"""
@staticmethod
def sin(x: str) -> str:
return f"jnp.sin({x})"
@staticmethod
def cos(x: str) -> str:
return f"jnp.cos({x})"
@staticmethod
def tan(x: str) -> str:
return f"jnp.tan({x})"
@staticmethod
def sinh(x: str) -> str:
return f"jnp.sinh({x})"
@staticmethod
def cosh(x: str) -> str:
return f"jnp.cosh({x})"
@staticmethod
def tanh(x: str) -> str:
return f"jnp.tanh({x})"
@staticmethod
def asin(x: str) -> str:
return f"jnp.arcsin({x})"
@staticmethod
def acos(x: str) -> str:
return f"jnp.arccos({x})"
@staticmethod
def atan(x: str) -> str:
return f"jnp.arctan({x})"
@staticmethod
def exp(x: str) -> str:
return f"jnp.exp({x})"
@staticmethod
def exp2(x: str) -> str:
return f"jnp.exp2({x})"
@staticmethod
def expm1(x: str) -> str:
return f"jnp.expm1({x})"
@staticmethod
def log(x: str) -> str:
return f"jnp.log({x})"
@staticmethod
def log10(x: str) -> str:
return f"jnp.log10({x})"
@staticmethod
def log2(x: str) -> str:
return f"jnp.log2({x})"
@staticmethod
def log1p(x: str) -> str:
return f"jnp.log1p({x})"
@staticmethod
def sqrt(x: str) -> str:
return f"jnp.sqrt({x})"
@staticmethod
def rsqrt(x: str) -> str:
return f"(1.0 / jnp.sqrt({x}))"
@staticmethod
def abs(x: str) -> str:
return f"jnp.abs({x})"
@staticmethod
def neg(x: str) -> str:
return f"(-{x})"
@staticmethod
def floor(x: str) -> str:
return f"jnp.floor({x})"
@staticmethod
def ceil(x: str) -> str:
return f"jnp.ceil({x})"
@staticmethod
def trunc(x: str) -> str:
return f"jnp.trunc({x})"
@staticmethod
def round(x: str) -> str:
return f"jnp.round({x})"
@staticmethod
def sigmoid(x: str) -> str:
return f"(1.0 / (1.0 + jnp.exp(-{x})))"
@staticmethod
def relu(x: str) -> str:
return f"jnp.maximum({x}, 0)"
@staticmethod
def pow(a: str, b: str) -> str:
return f"jnp.power({a}, {b})"
@staticmethod
def maximum(a: str, b: str) -> str:
return f"jnp.maximum({a}, {b})"
@staticmethod
def minimum(a: str, b: str) -> str:
return f"jnp.minimum({a}, {b})"
@staticmethod
def where(cond: str, a: str, b: str) -> str:
return f"jnp.where({cond}, {a}, {b})"
@staticmethod
def to_dtype(
x: str,
dtype: torch.dtype,
src_dtype: Optional[torch.dtype] = None,
use_compute_types: bool = True,
) -> str:
jax_dtype = torch_dtype_to_jax(dtype)
# Wrap in jnp.asarray to handle scalars from integer indexing
return f"jnp.asarray({x}).astype({jax_dtype})"
@staticmethod
def index_expr(expr: sympy.Expr, dtype: torch.dtype) -> str:
"""Convert a sympy expression to a JAX array indexing expression."""
from ..utils import get_bounds_index_expr
idx_str = V.kernel.kexpr(V.kernel.prepare_indexing(expr))
var = V.kernel.cse.generate(
V.kernel.compute, idx_str, bounds=get_bounds_index_expr(expr)
)
return PallasKernelOverrides.to_dtype(var, dtype)
@staticmethod
def constant(val, dtype: torch.dtype) -> str:
"""Convert a constant value to JAX representation."""
jax_dtype = torch_dtype_to_jax(dtype)
if dtype == torch.bool:
return "True" if val else "False"
return f"jnp.array({val}, dtype={jax_dtype})"
@staticmethod
def real(x: str) -> str:
return f"jnp.real({x})"
@staticmethod
def imag(x: str) -> str:
return f"jnp.imag({x})"
@staticmethod
def conj(x: str) -> str:
return f"jnp.conj({x})"
@staticmethod
def angle(x: str) -> str:
return f"jnp.angle({x})"
@staticmethod
def view_as_real(x: str) -> str:
"""View complex tensor as real tensor with extra dimension."""
return f"jnp.stack([jnp.real({x}), jnp.imag({x})], axis=-1)"
@staticmethod
def view_as_complex(x: str) -> str:
"""View real tensor as complex tensor."""
return f"({x}[..., 0] + 1j * {x}[..., 1])"
# Comparison operations
@staticmethod
def eq(a: str, b: str) -> str:
return f"({a} == {b})"
@staticmethod
def ne(a: str, b: str) -> str:
return f"({a} != {b})"
@staticmethod
def lt(a: str, b: str) -> str:
return f"({a} < {b})"
@staticmethod
def le(a: str, b: str) -> str:
return f"({a} <= {b})"
@staticmethod
def gt(a: str, b: str) -> str:
return f"({a} > {b})"
@staticmethod
def ge(a: str, b: str) -> str:
return f"({a} >= {b})"
# Logical operations
@staticmethod
def logical_and(a: str, b: str) -> str:
return f"jnp.logical_and({a}, {b})"
@staticmethod
def logical_or(a: str, b: str) -> str:
return f"jnp.logical_or({a}, {b})"
@staticmethod
def logical_not(x: str) -> str:
return f"jnp.logical_not({x})"
@staticmethod
def logical_xor(a: str, b: str) -> str:
return f"jnp.logical_xor({a}, {b})"
# Math operations
@staticmethod
def atan2(a: str, b: str) -> str:
return f"jnp.arctan2({a}, {b})"
@staticmethod
def hypot(a: str, b: str) -> str:
return f"jnp.hypot({a}, {b})"
@staticmethod
def fmod(a: str, b: str) -> str:
return f"jnp.fmod({a}, {b})"
@staticmethod
def remainder(a: str, b: str) -> str:
return f"jnp.remainder({a}, {b})"
@staticmethod
def clamp(x: str, min_val: str, max_val: str) -> str:
return f"jnp.clip({x}, {min_val}, {max_val})"
@staticmethod
def clip(x: str, min_val: str, max_val: str) -> str:
return f"jnp.clip({x}, {min_val}, {max_val})"
# Sign operations
@staticmethod
def sign(x: str) -> str:
return f"jnp.sign({x})"
@staticmethod
def signbit(x: str) -> str:
return f"jnp.signbit({x})"
# Special math functions
@staticmethod
def erf(x: str) -> str:
return f"jax.scipy.special.erf({x})"
@staticmethod
def erfc(x: str) -> str:
return f"jax.scipy.special.erfc({x})"
@staticmethod
def erfinv(x: str) -> str:
return f"jax.scipy.special.erfinv({x})"
@staticmethod
def lgamma(x: str) -> str:
return f"jax.scipy.special.gammaln({x})"
@staticmethod
def digamma(x: str) -> str:
return f"jax.scipy.special.digamma({x})"
# Reciprocal and square
@staticmethod
def reciprocal(x: str) -> str:
return f"jnp.reciprocal({x})"
@staticmethod
def square(x: str) -> str:
return f"jnp.square({x})"
# Additional operations
@staticmethod
def fma(a: str, b: str, c: str) -> str:
"""Fused multiply-add: a * b + c"""
return f"jnp.fma({a}, {b}, {c})"
@staticmethod
def copysign(a: str, b: str) -> str:
return f"jnp.copysign({a}, {b})"
@staticmethod
def nextafter(a: str, b: str) -> str:
return f"jnp.nextafter({a}, {b})"
@staticmethod
def ldexp(a: str, b: str) -> str:
return f"jnp.ldexp({a}, {b})"
@staticmethod
def frexp(x: str) -> str:
return f"jnp.frexp({x})"
@staticmethod
def modf(x: str) -> str:
return f"jnp.modf({x})"
# Bitwise operations
@staticmethod
def bitwise_and(a: str, b: str) -> str:
return f"jnp.bitwise_and({a}, {b})"
@staticmethod
def bitwise_or(a: str, b: str) -> str:
return f"jnp.bitwise_or({a}, {b})"
@staticmethod
def bitwise_xor(a: str, b: str) -> str:
return f"jnp.bitwise_xor({a}, {b})"
@staticmethod
def bitwise_not(x: str) -> str:
return f"jnp.bitwise_not({x})"
@staticmethod
def left_shift(a: str, b: str) -> str:
return f"jnp.left_shift({a}, {b})"
@staticmethod
def right_shift(a: str, b: str) -> str:
return f"jnp.right_shift({a}, {b})"
| PallasKernelOverrides |
python | pytorch__pytorch | torch/testing/_internal/jit_metaprogramming_utils.py | {
"start": 789,
"end": 22532
} | class ____(tuple):
__slots__ = ()
non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.float, device=None):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
if not non_contiguous or tensor.numel() < 2:
return tensor.clone()
return noncontiguous_like(tensor)
def conjugate(tensor):
return tensor.conj()
if isinstance(arg, (torch.Size, dont_convert)):
return arg
elif isinstance(arg, tuple) and len(arg) == 0:
var = conjugate(torch.randn((), dtype=dtype, device=device))
var.requires_grad = requires_grad
return var
elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)
# double check casting
elif isinstance(arg, non_differentiable):
if isinstance(arg.tensor, torch.Tensor):
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
elif isinstance(arg, torch.Tensor):
if arg.is_complex() != dtype.is_complex:
raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
"which is not supported for now")
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()
v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
return v
elif callable(arg):
return map_arg(arg(dtype=dtype, device=device))
else:
return arg
args_out = tuple(map_arg(arg) for arg in call_args)
kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
return args_out, kwargs_out
# NB: JIT script tests for all nn functional interfaces, script mode does
# not support in_place operations yet, so no inplace operation tests added.
# removed all the deprecated functions
#
# (
# method name,
# input size/constructing fn,
# args (tuple represents shape of a tensor arg),
# test variant name(will be used at test name suffix,
# 'inplace' skips grad tests), // optional
# (True, nonfusible_nodes, fusible_nodes) for autodiff // optional
# fn to determine if test should be skipped, // optional
# fn mapping output to part that should be gradcheck'ed, // optional
# kwargs for function, // optional
# )
def get_nn_functional_tests():
nn_functional_tests = [
('conv1d', (S, S, S), ((S, S, S),)),
('conv2d', (S, S, S, S), ((S, S, S, S),)),
('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)),
('conv_transpose1d', (S, S, S), ((S, S, S),)),
('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)),
('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)),
('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)),
('avg_pool1d', (S, S, S), (3,)),
('avg_pool2d', (S, S, S, S), (3,), '', (True,)),
('avg_pool3d', (S, S, S, S, S), (3,)),
('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)),
('max_pool1d', (S, S, S), (2, 1)),
('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'),
('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),
('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
('max_pool3d', (S, S, S, S, S), (2, 1)),
('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
('lp_pool1d', (S, S, S), (2., 3, 2,)),
('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
('lp_pool3d', (S, S, S, S, S), (2., 3, 2,)),
('adaptive_max_pool1d', (S, S, S), (5,)),
('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),
('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),
('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)),
('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)),
('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)),
('dropout', (S, S, S), (0.5,), '', (True, 'aten::native_dropout')),
('alpha_dropout', (S, S, S), (0.5,)),
('dropout2d', (S, S, S), (0.5,)),
('dropout2d', (S, S, S, S), (0.5,), 'batched'),
('dropout3d', (S, S, S, S), (0.5,)),
('dropout3d', (S, S, S, S, S), (0.5,), 'batched'),
('feature_alpha_dropout', (S, S, S), (0.5,)),
('threshold', (S, S, S), (0.1, 2.), '', (True,)),
('threshold', (S, S, S), (0.1, 2., True), 'inplace'),
('relu', (S, S, S), (), '', (True,)),
('relu', (S, S, S), (), 'inplace'),
('glu', (S - 1, S - 1, S - 1), (),),
('hardtanh', (S, S, S), (-0.5, 0.5), '', (True,)),
('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'),
('relu6', (S, S, S), (), '', (True,)),
('relu6', (S, S, S), (True), 'inplace'),
('elu', (S, S, S), (0.9,),),
('elu', (S, S, S), (0.9, True), 'inplace'),
('selu', (S, S, S), (),),
('selu', (S, S, S), (True), 'inplace'),
('celu', (S, S, S), (0.9,),),
('celu', (S, S, S), (0.9, True), 'inplace'),
('leaky_relu', (S, S, S), (0.02,), '', (True,)),
('leaky_relu', (S, S, S), (0.02,), 'inplace'),
('rrelu', (S, S), (0.1, 0.3, False),),
('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'),
('hardshrink', (S, S, S), (0.4,), '', (True,)),
('tanhshrink', (S, S, S), (),),
('softsign', (S, S, S), (),),
('softplus', (S, S, S), (), '', (True,)),
('softmin', (S, S, S), (0,),),
('softmax', (S, S, S), (0,), '', (True,)),
('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)),
('tanh', (S, S, S), (), '', (True,)),
('sigmoid', (S, S, S), (), '', (True,)),
('silu', (S, S, S), (), '', (True,)),
('log_softmax', (S, S, S), (0,), '', (True,)),
('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])),
('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])),
('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),
('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),
('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),
('batch_norm', (S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), None, None, True, ),
'training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (0, S, S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
'size_zero', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (0, S, S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
'size_zero_inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
'with_weight_and_bias_training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
None, non_differentiable(torch.ones(S)), True, ),
'with_only_bias_training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), None, True, ),
'with_only_weight_training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
None, None, False, ),
'inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), False, ),
'with_weight_and_bias_inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
None, non_differentiable(torch.ones(S)), False, ),
'with_only_bias_inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), None, False, ),
'with_only_weight_inference', (True, 'aten::_batch_norm_impl_index')),
('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),
('layer_norm', (S, S, S, S), ([5],), '',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),
non_differentiable(torch.rand(S))), 'with_weight_and_bias',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])),
('group_norm', (S, S, S), (1, torch.rand(5),),),
('local_response_norm', (S, S, S), (2, ),),
('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',),
('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),),
('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'),
('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),),
('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),),
('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),),
('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('margin_ranking_loss', (S,), ((S,), (S,)),),
('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),),
('pixel_shuffle', (1, 9, 4, 4), (3,),),
('pixel_unshuffle', (1, 1, 12, 12), (3,),),
('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),),
('pad', (3, 3, 4, 2), ([1, 1],),),
('pairwise_distance', (S, S), ((S, S),),),
('pdist', (S, S), (),),
('cosine_similarity', (S, S), ((S, S),),),
('triplet_margin_loss', (S, S), ((S, S), (S, S)),),
('normalize', (S, S, S), (),),
('unfold', (S, S, S, S), ([2, 3]),),
('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),),
('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),),
('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),),
('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)),
1, 1., non_differentiable(torch.randn(S))),),
('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)),
non_differentiable(torch.randn(3, 2))),),
('binary_cross_entropy', torch.randn(3, 2).sigmoid(),
(non_differentiable(torch.rand(3, 2)),
non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'),
('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(),
(torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long),
torch.randint(1, S, (S,), dtype=torch.long))),
('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'),
('upsample', torch.randn(S, S, M, M), (4,), 'with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'),
('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'),
('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False),
'nearest_4d_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False),
'nearest_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False),
'bilinear_4d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False),
'bilinear_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False),
'bicubic_4d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False),
'bicubic_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False),
'nearest_3d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False),
'nearest_3d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False),
'linear_3d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False),
'linear_3d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False),
'nearest_5d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False),
'nearest_5d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False),
'trilinear_5d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False),
'trilinear_5d_with_size_not_recompute_scale_factor'),
]
return nn_functional_tests
script_template = '''
def the_method({}):
return {}
'''
def value_to_literal(value):
if isinstance(value, str):
# Quotes string and escapes special characters
return ascii(value)
if isinstance(value, torch.Tensor):
return 'torch.' + str(value)
else:
return str(value)
def get_call(method_name, func_type, args, kwargs):
kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()])
self_arg = args[0]
if func_type == 'method':
args = args[1:]
argument_str = ', '.join(args)
argument_str += ', ' if len(args) and len(kwargs) else ''
argument_str += kwargs_str
if func_type == 'functional' or func_type == 'function':
call = f'torch.{method_name}({argument_str})'
elif func_type == 'method':
call = f'{self_arg}.{method_name}({argument_str})'
elif func_type == 'nn_functional':
call = f'torch.nn.functional.{method_name}({argument_str})'
else:
raise TypeError('Unsupported function type')
return call
def get_constant(x):
if x == inf:
return 'math.inf'
if x == -inf:
return '-math.inf'
return x
def get_script_args(args):
formals: list[str] = []
tensors: list[Union[torch.Tensor, list[torch.Tensor]]] = []
actuals: list[str] = []
for arg in args:
if isinstance(arg, torch.Tensor):
name = f'i{len(formals)}'
formals.append(name)
actuals.append(name)
tensors.append(arg)
elif is_iterable_of_tensors(arg):
name = f'i{len(formals)}'
formals.append(name + ': List[torch.Tensor]')
actuals.append(name)
tensors.append(list(arg))
elif isinstance(arg, str):
actuals.append(f"'{arg}'")
else:
actuals.append(str(get_constant(arg)))
return (formals, tensors, actuals)
# create a script function from (name, func_type, output_process_fn),
# and returns the compiled function and example inputs
def gen_script_fn_and_args(method_name, func_type, *args, **kwargs):
formals, tensors, actuals = get_script_args(args)
call = get_call(method_name, func_type, actuals, kwargs)
script = script_template.format(', '.join(formals), call)
CU = torch.jit.CompilationUnit(script)
return CU.the_method, tensors
# create a script function from (name, func_type),
# returns a function takes in (args, kwargs) and runs the compiled function
def create_script_fn(self, method_name, func_type):
# function returns tuple containing original output and
# filtered output to be used in checking gradients
def script_fn(*args, **kwargs):
fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs)
self.assertExportImport(fn.graph, tensors)
output = fn(*tensors)
# skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined]
return output
return script_fn
| dont_convert |
python | readthedocs__readthedocs.org | readthedocs/projects/tests/test_build_tasks.py | {
"start": 1331,
"end": 2541
} | class ____:
# NOTE: `load_yaml_config` maybe be moved to the setup and assign to self.
@pytest.fixture(autouse=True)
def setup(self, requests_mock):
# Save the reference to query it from inside the test
self.requests_mock = requests_mock
self.project = self._get_project()
self.version = self.project.versions.get(slug="latest")
self.build = fixture.get(
Build,
version=self.version,
commit="a1b2c3",
)
self.mocker = BuildEnvironmentMocker(
self.project,
self.version,
self.build,
self.requests_mock,
)
self.mocker.start()
yield
# tearDown
self.mocker.stop()
def _get_project(self):
return fixture.get(
Project,
slug="project",
)
def _trigger_update_docs_task(self):
# NOTE: is it possible to replace calling this directly by `trigger_build` instead? :)
return update_docs_task.delay(
self.version.pk,
self.build.pk,
build_api_key="1234",
build_commit=self.build.commit,
)
| BuildEnvironmentBase |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 24716,
"end": 24860
} | class ____(RootModel[str]):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(frozen=True)
| GroupName |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/named_types.py | {
"start": 5872,
"end": 6102
} | class ____(NamedTypeDropper):
def visit_enum(self, enum):
if not self._can_drop_type(enum):
return
with self.with_ddl_events(enum):
self.connection.execute(DropEnumType(enum))
| EnumDropper |
python | kubernetes-client__python | kubernetes/client/models/v1alpha1_pod_certificate_request_spec.py | {
"start": 383,
"end": 19698
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'max_expiration_seconds': 'int',
'node_name': 'str',
'node_uid': 'str',
'pkix_public_key': 'str',
'pod_name': 'str',
'pod_uid': 'str',
'proof_of_possession': 'str',
'service_account_name': 'str',
'service_account_uid': 'str',
'signer_name': 'str'
}
attribute_map = {
'max_expiration_seconds': 'maxExpirationSeconds',
'node_name': 'nodeName',
'node_uid': 'nodeUID',
'pkix_public_key': 'pkixPublicKey',
'pod_name': 'podName',
'pod_uid': 'podUID',
'proof_of_possession': 'proofOfPossession',
'service_account_name': 'serviceAccountName',
'service_account_uid': 'serviceAccountUID',
'signer_name': 'signerName'
}
def __init__(self, max_expiration_seconds=None, node_name=None, node_uid=None, pkix_public_key=None, pod_name=None, pod_uid=None, proof_of_possession=None, service_account_name=None, service_account_uid=None, signer_name=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1PodCertificateRequestSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._max_expiration_seconds = None
self._node_name = None
self._node_uid = None
self._pkix_public_key = None
self._pod_name = None
self._pod_uid = None
self._proof_of_possession = None
self._service_account_name = None
self._service_account_uid = None
self._signer_name = None
self.discriminator = None
if max_expiration_seconds is not None:
self.max_expiration_seconds = max_expiration_seconds
self.node_name = node_name
self.node_uid = node_uid
self.pkix_public_key = pkix_public_key
self.pod_name = pod_name
self.pod_uid = pod_uid
self.proof_of_possession = proof_of_possession
self.service_account_name = service_account_name
self.service_account_uid = service_account_uid
self.signer_name = signer_name
@property
def max_expiration_seconds(self):
"""Gets the max_expiration_seconds of this V1alpha1PodCertificateRequestSpec. # noqa: E501
maxExpirationSeconds is the maximum lifetime permitted for the certificate. If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days). The signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours. # noqa: E501
:return: The max_expiration_seconds of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:rtype: int
"""
return self._max_expiration_seconds
@max_expiration_seconds.setter
def max_expiration_seconds(self, max_expiration_seconds):
"""Sets the max_expiration_seconds of this V1alpha1PodCertificateRequestSpec.
maxExpirationSeconds is the maximum lifetime permitted for the certificate. If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days). The signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours. # noqa: E501
:param max_expiration_seconds: The max_expiration_seconds of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:type: int
"""
self._max_expiration_seconds = max_expiration_seconds
@property
def node_name(self):
"""Gets the node_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
nodeName is the name of the node the pod is assigned to. # noqa: E501
:return: The node_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""Sets the node_name of this V1alpha1PodCertificateRequestSpec.
nodeName is the name of the node the pod is assigned to. # noqa: E501
:param node_name: The node_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and node_name is None: # noqa: E501
raise ValueError("Invalid value for `node_name`, must not be `None`") # noqa: E501
self._node_name = node_name
@property
def node_uid(self):
"""Gets the node_uid of this V1alpha1PodCertificateRequestSpec. # noqa: E501
nodeUID is the UID of the node the pod is assigned to. # noqa: E501
:return: The node_uid of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:rtype: str
"""
return self._node_uid
@node_uid.setter
def node_uid(self, node_uid):
"""Sets the node_uid of this V1alpha1PodCertificateRequestSpec.
nodeUID is the UID of the node the pod is assigned to. # noqa: E501
:param node_uid: The node_uid of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and node_uid is None: # noqa: E501
raise ValueError("Invalid value for `node_uid`, must not be `None`") # noqa: E501
self._node_uid = node_uid
@property
def pkix_public_key(self):
"""Gets the pkix_public_key of this V1alpha1PodCertificateRequestSpec. # noqa: E501
pkixPublicKey is the PKIX-serialized public key the signer will issue the certificate to. The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, or ED25519. Note that this list may be expanded in the future. Signer implementations do not need to support all key types supported by kube-apiserver and kubelet. If a signer does not support the key type used for a given PodCertificateRequest, it must deny the request by setting a status.conditions entry with a type of \"Denied\" and a reason of \"UnsupportedKeyType\". It may also suggest a key type that it does support in the message field. # noqa: E501
:return: The pkix_public_key of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:rtype: str
"""
return self._pkix_public_key
@pkix_public_key.setter
def pkix_public_key(self, pkix_public_key):
"""Sets the pkix_public_key of this V1alpha1PodCertificateRequestSpec.
pkixPublicKey is the PKIX-serialized public key the signer will issue the certificate to. The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, or ED25519. Note that this list may be expanded in the future. Signer implementations do not need to support all key types supported by kube-apiserver and kubelet. If a signer does not support the key type used for a given PodCertificateRequest, it must deny the request by setting a status.conditions entry with a type of \"Denied\" and a reason of \"UnsupportedKeyType\". It may also suggest a key type that it does support in the message field. # noqa: E501
:param pkix_public_key: The pkix_public_key of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pkix_public_key is None: # noqa: E501
raise ValueError("Invalid value for `pkix_public_key`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
pkix_public_key is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', pkix_public_key)): # noqa: E501
raise ValueError(r"Invalid value for `pkix_public_key`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501
self._pkix_public_key = pkix_public_key
@property
def pod_name(self):
"""Gets the pod_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
podName is the name of the pod into which the certificate will be mounted. # noqa: E501
:return: The pod_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:rtype: str
"""
return self._pod_name
@pod_name.setter
def pod_name(self, pod_name):
"""Sets the pod_name of this V1alpha1PodCertificateRequestSpec.
podName is the name of the pod into which the certificate will be mounted. # noqa: E501
:param pod_name: The pod_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pod_name is None: # noqa: E501
raise ValueError("Invalid value for `pod_name`, must not be `None`") # noqa: E501
self._pod_name = pod_name
@property
def pod_uid(self):
"""Gets the pod_uid of this V1alpha1PodCertificateRequestSpec. # noqa: E501
podUID is the UID of the pod into which the certificate will be mounted. # noqa: E501
:return: The pod_uid of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:rtype: str
"""
return self._pod_uid
@pod_uid.setter
def pod_uid(self, pod_uid):
"""Sets the pod_uid of this V1alpha1PodCertificateRequestSpec.
podUID is the UID of the pod into which the certificate will be mounted. # noqa: E501
:param pod_uid: The pod_uid of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pod_uid is None: # noqa: E501
raise ValueError("Invalid value for `pod_uid`, must not be `None`") # noqa: E501
self._pod_uid = pod_uid
@property
def proof_of_possession(self):
"""Gets the proof_of_possession of this V1alpha1PodCertificateRequestSpec. # noqa: E501
proofOfPossession proves that the requesting kubelet holds the private key corresponding to pkixPublicKey. It is contructed by signing the ASCII bytes of the pod's UID using `pkixPublicKey`. kube-apiserver validates the proof of possession during creation of the PodCertificateRequest. If the key is an RSA key, then the signature is over the ASCII bytes of the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang function crypto/rsa.SignPSS with nil options). If the key is an ECDSA key, then the signature is as described by [SEC 1, Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the golang library function crypto/ecdsa.SignASN1) If the key is an ED25519 key, the the signature is as described by the [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the golang library crypto/ed25519.Sign). # noqa: E501
:return: The proof_of_possession of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:rtype: str
"""
return self._proof_of_possession
@proof_of_possession.setter
def proof_of_possession(self, proof_of_possession):
"""Sets the proof_of_possession of this V1alpha1PodCertificateRequestSpec.
proofOfPossession proves that the requesting kubelet holds the private key corresponding to pkixPublicKey. It is contructed by signing the ASCII bytes of the pod's UID using `pkixPublicKey`. kube-apiserver validates the proof of possession during creation of the PodCertificateRequest. If the key is an RSA key, then the signature is over the ASCII bytes of the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang function crypto/rsa.SignPSS with nil options). If the key is an ECDSA key, then the signature is as described by [SEC 1, Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the golang library function crypto/ecdsa.SignASN1) If the key is an ED25519 key, the the signature is as described by the [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the golang library crypto/ed25519.Sign). # noqa: E501
:param proof_of_possession: The proof_of_possession of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and proof_of_possession is None: # noqa: E501
raise ValueError("Invalid value for `proof_of_possession`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
proof_of_possession is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', proof_of_possession)): # noqa: E501
raise ValueError(r"Invalid value for `proof_of_possession`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501
self._proof_of_possession = proof_of_possession
@property
def service_account_name(self):
"""Gets the service_account_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
serviceAccountName is the name of the service account the pod is running as. # noqa: E501
:return: The service_account_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:rtype: str
"""
return self._service_account_name
@service_account_name.setter
def service_account_name(self, service_account_name):
"""Sets the service_account_name of this V1alpha1PodCertificateRequestSpec.
serviceAccountName is the name of the service account the pod is running as. # noqa: E501
:param service_account_name: The service_account_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and service_account_name is None: # noqa: E501
raise ValueError("Invalid value for `service_account_name`, must not be `None`") # noqa: E501
self._service_account_name = service_account_name
@property
def service_account_uid(self):
"""Gets the service_account_uid of this V1alpha1PodCertificateRequestSpec. # noqa: E501
serviceAccountUID is the UID of the service account the pod is running as. # noqa: E501
:return: The service_account_uid of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:rtype: str
"""
return self._service_account_uid
@service_account_uid.setter
def service_account_uid(self, service_account_uid):
"""Sets the service_account_uid of this V1alpha1PodCertificateRequestSpec.
serviceAccountUID is the UID of the service account the pod is running as. # noqa: E501
:param service_account_uid: The service_account_uid of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and service_account_uid is None: # noqa: E501
raise ValueError("Invalid value for `service_account_uid`, must not be `None`") # noqa: E501
self._service_account_uid = service_account_uid
@property
def signer_name(self):
"""Gets the signer_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
signerName indicates the requested signer. All signer names beginning with `kubernetes.io` are reserved for use by the Kubernetes project. There is currently one well-known signer documented by the Kubernetes project, `kubernetes.io/kube-apiserver-client-pod`, which will issue client certificates understood by kube-apiserver. It is currently unimplemented. # noqa: E501
:return: The signer_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:rtype: str
"""
return self._signer_name
@signer_name.setter
def signer_name(self, signer_name):
"""Sets the signer_name of this V1alpha1PodCertificateRequestSpec.
signerName indicates the requested signer. All signer names beginning with `kubernetes.io` are reserved for use by the Kubernetes project. There is currently one well-known signer documented by the Kubernetes project, `kubernetes.io/kube-apiserver-client-pod`, which will issue client certificates understood by kube-apiserver. It is currently unimplemented. # noqa: E501
:param signer_name: The signer_name of this V1alpha1PodCertificateRequestSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and signer_name is None: # noqa: E501
raise ValueError("Invalid value for `signer_name`, must not be `None`") # noqa: E501
self._signer_name = signer_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1PodCertificateRequestSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1PodCertificateRequestSpec):
return True
return self.to_dict() != other.to_dict()
| V1alpha1PodCertificateRequestSpec |
python | realpython__materials | python-annotations/models.py | {
"start": 86,
"end": 190
} | class ____:
email: str
password: str
def __post_init__(self):
validate_email(self)
| User |
python | getsentry__sentry | tests/sentry/api/serializers/test_grouptagkey.py | {
"start": 137,
"end": 442
} | class ____(TestCase):
def test(self) -> None:
user = self.create_user()
grouptagkey = GroupTagKey(group_id=0, key="key", values_seen=1)
result = serialize(grouptagkey, user)
assert result["key"] == "key"
assert result["uniqueValues"] == 1
| GroupTagKeySerializerTest |
python | huggingface__transformers | src/transformers/models/mistral3/configuration_mistral3.py | {
"start": 725,
"end": 5710
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Mistral3ForConditionalGeneration`]. It is used to instantiate an
Mistral3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
[mistralai/Mistral-Small-3.1-24B-Instruct-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Instruct-2503)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `PixtralVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MistralConfig`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 10):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -1):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
multimodal_projector_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the multimodal projector.
spatial_merge_size (`int`, *optional*, defaults to 2):
The downsampling factor for the spatial merge operation.
Example:
```python
>>> from transformers import Mistral3ForConditionalGeneration, Mistral3Config, PixtralVisionConfig, MistralConfig
>>> # Initializing a Pixtral-vision config
>>> vision_config = PixtralVisionConfig()
>>> # Initializing a Mistral config
>>> text_config = MistralConfig()
>>> # Initializing a Mistral3 configuration
>>> configuration = Mistral3Config(vision_config, text_config)
>>> # Initializing a model from the mistral3.1 configuration
>>> model = Mistral3ForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mistral3"
attribute_map = {
"image_token_id": "image_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
is_composition = True
def __init__(
self,
vision_config=None,
text_config=None,
image_token_index=10,
projector_hidden_act="gelu",
vision_feature_layer=-1,
multimodal_projector_bias=False,
spatial_merge_size=2,
**kwargs,
):
self.image_token_index = image_token_index
self.projector_hidden_act = projector_hidden_act
self.vision_feature_layer = vision_feature_layer
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "pixtral")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["pixtral"](
intermediate_size=4096,
hidden_size=1024,
patch_size=14,
image_size=1540,
num_hidden_layers=24,
num_attention_heads=16,
vocab_size=32000,
head_dim=64,
hidden_act="gelu",
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "mistral")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["mistral"](
attention_dropout=0.0,
head_dim=128,
hidden_act="silu",
hidden_size=5120,
initializer_range=0.02,
intermediate_size=32768,
max_position_embeddings=131072,
model_type="mistral",
num_attention_heads=32,
num_hidden_layers=40,
num_key_value_heads=8,
rms_norm_eps=1e-05,
rope_theta=1000000000.0,
sliding_window=None,
use_cache=True,
vocab_size=131072,
)
self.text_config = text_config
self.multimodal_projector_bias = multimodal_projector_bias
self.spatial_merge_size = spatial_merge_size
super().__init__(**kwargs)
__all__ = ["Mistral3Config"]
| Mistral3Config |
python | astropy__astropy | astropy/modeling/spline.py | {
"start": 24128,
"end": 27532
} | class ____(_SplineFitter):
"""
Fit a spline using the `scipy.interpolate.splrep` function interface.
"""
def __init__(self):
super().__init__()
self.fit_info = {"fp": None, "ier": None, "msg": None}
def __call__(self, model, x, y, **kwargs):
"""
Fit a spline to data using the splrep interface.
Parameters
----------
model : `Spline1D`
The spline model to fit.
x : array-like
The x data values.
y : array-like
The y data values.
task : int, optional
Task parameter for splrep. Default is 0. See
`scipy.interpolate.splrep` for details.
t : array-like, optional
The interior knots needed for ``task=-1``. If given,
then ``task`` is automatically set to -1. see
`scipy.interpolate.splrep` for details. Interior knots for
the spline. If not provided, the model's existing interior
knots (``t_interior``) are used if available.
s : float, optional
Positive smoothing factor used to choose the number of
knots. The user can use ``s`` to control the tradeoff
between closeness and smoothness of fit. Larger ``s`` means
more smoothing while smaller values of ``s`` indicate less
smoothing. If not provided or `None`, ``s`` is calculated
automatically based on the data.
**kwargs : dict, optional
Additional keyword arguments:
- ``weights`` : array-like, optional
Weights for the data points.
- ``bbox`` : array-like, optional
The bounding box limits as ``[xmin, xmax]``. Default is
``[None, None]``.
Returns
-------
fitted_copy : `Spline1D`
A copy of the input model with fitted parameters.
Notes
-----
The fit information (fp, ier, msg) from splrep is stored in
the ``fit_info`` attribute of the fitter instance.
"""
return super().__call__(model, x, y, **kwargs)
def _fit_method(self, model, x, y, **kwargs):
t = kwargs.pop("t", None)
s = kwargs.pop("s", None)
task = kwargs.pop("task", 0)
weights = kwargs.pop("weights", None)
bbox = kwargs.pop("bbox", [None, None])
if t is not None:
if model.user_knots:
warnings.warn(
"The user-specified knots from the input model "
"will be overwritten by knots passed into this "
"function",
AstropyUserWarning,
)
else:
if model.user_knots:
t = model.t_interior
if bbox != [None, None]:
model.bounding_box = bbox
from scipy.interpolate import splrep
tck, fp, ier, msg = splrep(
x,
y,
w=weights,
xb=bbox[0],
xe=bbox[1],
k=model.degree,
s=s,
t=t,
task=task,
full_output=1,
)
model.tck = tck
return fp, ier, msg
def _set_fit_info(self, spline):
self.fit_info["fp"] = spline[0]
self.fit_info["ier"] = spline[1]
self.fit_info["msg"] = spline[2]
| SplineSplrepFitter |
python | davidhalter__jedi | test/completion/classes.py | {
"start": 8127,
"end": 8267
} | class ____(Foo):
def foo(self):
#? int()
super().foo()
# -----------------
# if flow at class level
# -----------------
| Foo |
python | jina-ai__jina | jina/excepts.py | {
"start": 2393,
"end": 2541
} | class ____(Exception, BaseJinaException):
"""Raised when trying to use non-containerized Executor in K8s or Docker Compose"""
| NoContainerizedError |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 221804,
"end": 224447
} | class ____:
@pytest.mark.parametrize('dtype', [
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64
])
def test_basic(self, dtype):
a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype)
b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype)
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
assert_array_equal(a[idx], np.sort(a))
def test_mixed(self):
a = np.array([1, 2, 1, 3, 1, 5])
b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
def test_datetime(self):
a = np.array([0, 0, 0], dtype='datetime64[D]')
b = np.array([2, 1, 0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0, 0, 0], dtype='timedelta64[D]')
b = np.array([2, 1, 0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_strings(self): # gh-27984
for dtype in "TU":
surnames = np.array(['Hertz', 'Galilei', 'Hertz'], dtype=dtype)
first_names = np.array(['Heinrich', 'Galileo', 'Gustav'], dtype=dtype)
assert_array_equal(np.lexsort((first_names, surnames)), [1, 2, 0])
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42 * 3).reshape(42, 3)
assert_raises(AxisError, np.lexsort, x, axis=2)
def normalize_filename(tmp_path, param):
# Handles two cases, where filename should
# be a string, or a path object.
path = tmp_path / "file"
if param == "string":
return str(path)
return path
| TestLexsort |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1392125,
"end": 1393565
} | class ____(sgqlc.types.Type, Node):
"""Represents a 'referenced' event on a given `ReferencedSubject`."""
__schema__ = github_schema
__field_names__ = ("actor", "commit", "commit_repository", "created_at", "is_cross_repository", "is_direct_reference", "subject")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
commit = sgqlc.types.Field(Commit, graphql_name="commit")
"""Identifies the commit associated with the 'referenced' event."""
commit_repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="commitRepository")
"""Identifies the repository associated with the 'referenced' event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
is_cross_repository = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isCrossRepository")
"""Reference originated in a different repository."""
is_direct_reference = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isDirectReference")
"""Checks if the commit message itself references the subject. Can be
false in the case of a commit comment reference.
"""
subject = sgqlc.types.Field(sgqlc.types.non_null("ReferencedSubject"), graphql_name="subject")
"""Object referenced by event."""
| ReferencedEvent |
python | sqlalchemy__sqlalchemy | test/orm/test_froms.py | {
"start": 39132,
"end": 56493
} | class ____(QueryTest, AssertsCompiledSQL):
def test_from_alias_two_needs_nothing(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select()
.where(users.c.id == 7)
.union(users.select().where(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select()
.order_by(text("ulist.id"), addresses.c.id)
)
sess = fixture_session()
q = sess.query(User)
def go():
result = (
q.options(contains_eager(User.addresses))
.from_statement(query)
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_two(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select()
.where(users.c.id == 7)
.union(users.select().where(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select()
.order_by(text("ulist.id"), addresses.c.id)
)
sess = fixture_session()
def go():
ulist = query.alias("ulist")
ulist_alias = aliased(User, alias=ulist)
q = sess.query(ulist_alias)
result = q.options(
contains_eager(ulist_alias.addresses, alias=ulist)
).all()
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_three(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select()
.where(users.c.id == 7)
.union(users.select().where(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select()
.order_by(text("ulist.id"), addresses.c.id)
)
sess = fixture_session()
def go():
qs = query.subquery()
ua = aliased(User, qs)
result = (
sess.query(ua)
.options(contains_eager(ua.addresses, alias=qs))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_four(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
Address = self.classes.Address
sess = fixture_session()
adalias = addresses.alias()
query = (
users.select()
.where(users.c.id == 7)
.union(users.select().where(users.c.id > 7))
.alias("ulist")
.outerjoin(adalias)
.select()
.order_by(text("ulist.id"), adalias.c.id)
)
def go():
qs = query.subquery()
ua = aliased(User, qs)
aa = aliased(Address, qs)
result = (
sess.query(ua)
.options(contains_eager(ua.addresses.of_type(aa)))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_one(self):
addresses, User = (self.tables.addresses, self.classes.User)
sess = fixture_session()
# test that contains_eager suppresses the normal outer join rendering
q = (
sess.query(User)
.outerjoin(User.addresses)
.options(contains_eager(User.addresses))
.order_by(User.id, addresses.c.id)
)
self.assert_compile(
q.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).statement,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS "
"addresses_email_address, users.id AS "
"users_id, users.name AS users_name FROM "
"users LEFT OUTER JOIN addresses ON "
"users.id = addresses.user_id ORDER BY "
"users.id, addresses.id",
dialect=default.DefaultDialect(),
)
def go():
assert self.static.user_address_result == q.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_two(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
sess = fixture_session()
adalias = addresses.alias()
qq = users.outerjoin(adalias)
ua = aliased(User, qq)
q = (
sess.query(ua)
.options(contains_eager(ua.addresses, alias=adalias))
.order_by(User.id, adalias.c.id)
)
def go():
eq_(self.static.user_address_result, q.all())
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_four(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
sess = fixture_session()
selectquery = (
users.outerjoin(addresses)
.select()
.where(users.c.id < 10)
.order_by(users.c.id, addresses.c.id)
)
q = sess.query(User)
def go():
result = (
q.options(contains_eager(User.addresses))
.from_statement(selectquery)
.all()
)
assert self.static.user_address_result[0:3] == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_four_future(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
sess = fixture_session(future=True)
selectquery = (
users.outerjoin(addresses)
.select()
.where(users.c.id < 10)
.order_by(users.c.id, addresses.c.id)
)
q = select(User)
def go():
result = (
sess.execute(
q.options(contains_eager(User.addresses)).from_statement(
selectquery
)
)
.scalars()
.unique()
.all()
)
assert self.static.user_address_result[0:3] == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_aliased(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
q = sess.query(User)
# Aliased object
adalias = aliased(Address)
def go():
result = (
q.options(contains_eager(User.addresses.of_type(adalias)))
.outerjoin(adalias, User.addresses)
.order_by(User.id, adalias.id)
)
assert self.static.user_address_result == result.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_alias(self):
orders, items, users, order_items, User = (
self.tables.orders,
self.tables.items,
self.tables.users,
self.tables.order_items,
self.classes.User,
)
Order = self.classes.Order
sess = fixture_session()
q = sess.query(User)
oalias = orders.alias("o1")
ialias = items.alias("i1")
query = (
users.outerjoin(oalias)
.outerjoin(order_items)
.outerjoin(ialias)
.select()
.order_by(users.c.id, oalias.c.id, ialias.c.id)
)
# test using Alias with more than one level deep
def go():
result = list(
q.options(
contains_eager(User.orders, alias=oalias).contains_eager(
Order.items, alias=ialias
),
).from_statement(query)
)
assert self.static.user_order_result == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_aliased(self):
Item, User, Order = (
self.classes.Item,
self.classes.User,
self.classes.Order,
)
sess = fixture_session()
q = sess.query(User)
# test using Aliased with more than one level deep
oalias = aliased(Order)
ialias = aliased(Item)
def go():
result = (
q.options(
contains_eager(User.orders, alias=oalias),
contains_eager(User.orders, Order.items, alias=ialias),
)
.outerjoin(oalias, User.orders)
.outerjoin(ialias, oalias.items)
.order_by(User.id, oalias.id, ialias.id)
)
assert self.static.user_order_result == result.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_aliased_of_type(self):
# test newer style that does not use the alias parameter
Item, User, Order = (
self.classes.Item,
self.classes.User,
self.classes.Order,
)
sess = fixture_session()
q = sess.query(User)
# test using Aliased with more than one level deep
oalias = aliased(Order)
ialias = aliased(Item)
def go():
result = (
q.options(
contains_eager(User.orders.of_type(oalias)).contains_eager(
oalias.items.of_type(ialias)
)
)
.outerjoin(User.orders.of_type(oalias))
.outerjoin(oalias.items.of_type(ialias))
.order_by(User.id, oalias.id, ialias.id)
)
assert self.static.user_order_result == result.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_chaining(self):
"""test that contains_eager() 'chains' by default."""
Dingaling, User, Address = (
self.classes.Dingaling,
self.classes.User,
self.classes.Address,
)
sess = fixture_session()
q = (
sess.query(User)
.join(User.addresses)
.join(Address.dingaling)
.options(contains_eager(User.addresses, Address.dingaling))
)
def go():
eq_(
q.all(),
# note we only load the Address records that
# have a Dingaling here due to using the inner
# join for the eager load
[
User(
name="ed",
addresses=[
Address(
email_address="ed@wood.com",
dingaling=Dingaling(data="ding 1/2"),
)
],
),
User(
name="fred",
addresses=[
Address(
email_address="fred@fred.com",
dingaling=Dingaling(data="ding 2/5"),
)
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_chaining_aliased_endpoint(self):
"""test that contains_eager() 'chains' by default and supports
an alias at the end."""
Dingaling, User, Address = (
self.classes.Dingaling,
self.classes.User,
self.classes.Address,
)
sess = fixture_session()
da = aliased(Dingaling, name="foob")
q = (
sess.query(User)
.join(User.addresses)
.join(da, Address.dingaling)
.options(
contains_eager(User.addresses, Address.dingaling, alias=da)
)
)
def go():
eq_(
q.all(),
# note we only load the Address records that
# have a Dingaling here due to using the inner
# join for the eager load
[
User(
name="ed",
addresses=[
Address(
email_address="ed@wood.com",
dingaling=Dingaling(data="ding 1/2"),
)
],
),
User(
name="fred",
addresses=[
Address(
email_address="fred@fred.com",
dingaling=Dingaling(data="ding 2/5"),
)
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
def test_mixed_eager_contains_with_limit(self):
Order, User, Address = (
self.classes.Order,
self.classes.User,
self.classes.Address,
)
sess = fixture_session()
q = sess.query(User)
def go():
# outerjoin to User.orders, offset 1/limit 2 so we get user
# 7 + second two orders. then joinedload the addresses.
# User + Order columns go into the subquery, address left
# outer joins to the subquery, joinedloader for User.orders
# applies context.adapter to result rows. This was
# [ticket:1180].
result = (
q.outerjoin(User.orders)
.options(
joinedload(User.addresses), contains_eager(User.orders)
)
.order_by(User.id, Order.id)
.offset(1)
.limit(2)
.all()
)
eq_(
result,
[
User(
id=7,
addresses=[
Address(
email_address="jack@bean.com", user_id=7, id=1
)
],
name="jack",
orders=[
Order(
address_id=1,
user_id=7,
description="order 3",
isopen=1,
id=3,
),
Order(
address_id=None,
user_id=7,
description="order 5",
isopen=0,
id=5,
),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
# same as above, except Order is aliased, so two adapters
# are applied by the eager loader
oalias = aliased(Order)
result = (
q.outerjoin(oalias, User.orders)
.options(
joinedload(User.addresses),
contains_eager(User.orders, alias=oalias),
)
.order_by(User.id, oalias.id)
.offset(1)
.limit(2)
.all()
)
eq_(
result,
[
User(
id=7,
addresses=[
Address(
email_address="jack@bean.com", user_id=7, id=1
)
],
name="jack",
orders=[
Order(
address_id=1,
user_id=7,
description="order 3",
isopen=1,
id=3,
),
Order(
address_id=None,
user_id=7,
description="order 5",
isopen=0,
id=5,
),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
| InstancesTest |
python | chroma-core__chroma | chromadb/utils/embedding_functions/chroma_bm25_embedding_function.py | {
"start": 1205,
"end": 1381
} | class ____(TypedDict, total=False):
k: float
b: float
avg_doc_length: float
token_max_length: int
stopwords: List[str]
store_tokens: bool
| ChromaBm25Config |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/xcom.py | {
"start": 2292,
"end": 2545
} | class ____(XComResponse):
"""XCom response serializer with string return type."""
value: str | None
@field_validator("value", mode="before")
def value_to_string(cls, v):
return str(v) if v is not None else None
| XComResponseString |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/dt_diamond_left/package.py | {
"start": 217,
"end": 574
} | class ____(Package):
"""This package has an indirect diamond dependency on dt-diamond-bottom"""
homepage = "http://www.example.com"
url = "http://www.example.com/dt-diamond-left-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("dt-diamond-bottom", type="build")
depends_on("c", type="build")
| DtDiamondLeft |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_init.py | {
"start": 16414,
"end": 19126
} | class ____(FSDPTestMultiThread):
@property
def world_size(self) -> int:
return 2
@skip_if_lt_x_gpu(1)
def test_shard_tensor_parameters(self):
# Use odd dim sizes to test uneven shards
model = nn.Sequential(*[MLP(3, dim_multiplier=3) for _ in range(3)])
orig_params = [param.detach().clone() for param in model.parameters()]
fully_shard(model)
sharded_params = list(model.parameters())
self._check_1d_sharded_parameters(orig_params, sharded_params)
model = nn.Sequential(*[MLP(3, dim_multiplier=3) for _ in range(3)])
model[0].in_proj = model[1].in_proj
orig_params = [param.detach().clone() for param in model.parameters()]
fully_shard(model)
sharded_params = list(model.parameters())
self._check_1d_sharded_parameters(orig_params, sharded_params)
def _check_1d_sharded_parameters(
self, orig_params: list[nn.Parameter], sharded_params: list[nn.Parameter]
):
self.assertEqual(len(orig_params), len(sharded_params))
global_mesh = init_device_mesh(device_type.type, (self.world_size,))
for orig_param, sharded_param in zip(orig_params, sharded_params):
self.assertIsInstance(sharded_param, DTensor)
self.assertEqual(sharded_param.device_mesh, global_mesh)
self.assertEqual(sharded_param.size(), orig_param.size())
self.assertEqual(sharded_param.stride(), orig_param.stride())
self.assertEqual(sharded_param._spec.placements, (Shard(0),))
chunks = torch.chunk(orig_param, self.world_size, dim=0)
self.assertEqual(sharded_param._local_tensor, chunks[self.rank])
@skip_if_lt_x_gpu(1)
def test_raise_scalar_parameter(self):
"""Tests raising an exception when the model has scalar parameters."""
model = nn.Sequential(*[MLP(3, dim_multiplier=3) for _ in range(3)])
model.register_parameter(
"scalar_p", nn.Parameter(torch.tensor(1.0).to(device_type))
)
with self.assertRaisesRegex(
ValueError, "Change scalar_p to a 1D tensor with numel equal to 1."
):
fully_shard(model)
@skip_if_lt_x_gpu(1)
def test_raise_noncontiguous_parameter(self):
"""
Tests raising an exception when the model has non-contiguous
parameters. This is due to lack of implementation support.
"""
conv2d = nn.Conv2d(8, 8, 3).to(memory_format=torch.channels_last)
with self.assertRaisesRegex(
NotImplementedError, "FSDP does not support non-contiguous parameters"
):
fully_shard(conv2d)
| TestFullyShardShardedParameterTensor |
python | imageio__imageio | imageio/plugins/lytro.py | {
"start": 6744,
"end": 14728
} | class ____(LytroFormat):
"""This is the Lytro Illum LFR format.
The lfr is a image and meta data container format as used by the
Lytro Illum light field camera.
The format will read the specified lfr file.
This format does not support writing.
Parameters for reading
----------------------
meta_only : bool
Whether to only read the metadata.
include_thumbnail : bool
Whether to include an image thumbnail in the metadata.
"""
def _can_read(self, request):
# Check if mode and extensions are supported by the format
if request.extension in (".lfr",):
return True
# -- reader
class Reader(Format.Reader):
def _open(self, meta_only=False, include_thumbnail=True):
self._file = self.request.get_file()
self._data = None
self._chunks = {}
self.metadata = {}
self._content = None
self._meta_only = meta_only
self._include_thumbnail = include_thumbnail
self._find_header()
self._find_chunks()
self._find_meta()
try:
# Get sha1 dict and check if it is in dictionary of data chunks
chunk_dict = self._content["frames"][0]["frame"]
if (
chunk_dict["metadataRef"] in self._chunks
and chunk_dict["imageRef"] in self._chunks
and chunk_dict["privateMetadataRef"] in self._chunks
):
if not self._meta_only:
# Read raw image data byte buffer
data_pos, size = self._chunks[chunk_dict["imageRef"]]
self._file.seek(data_pos, 0)
self.raw_image_data = self._file.read(size)
# Read meta data
data_pos, size = self._chunks[chunk_dict["metadataRef"]]
self._file.seek(data_pos, 0)
metadata = self._file.read(size)
# Add metadata to meta data dict
self.metadata["metadata"] = json.loads(metadata.decode("ASCII"))
# Read private metadata
data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]]
self._file.seek(data_pos, 0)
serial_numbers = self._file.read(size)
self.serial_numbers = json.loads(serial_numbers.decode("ASCII"))
# Add private metadata to meta data dict
self.metadata["privateMetadata"] = self.serial_numbers
# Read image preview thumbnail
if self._include_thumbnail:
chunk_dict = self._content["thumbnails"][0]
if chunk_dict["imageRef"] in self._chunks:
# Read thumbnail image from thumbnail chunk
data_pos, size = self._chunks[chunk_dict["imageRef"]]
self._file.seek(data_pos, 0)
# Read binary data, read image as jpeg
thumbnail_data = self._file.read(size)
thumbnail_img = imread(thumbnail_data, format="jpeg")
thumbnail_height = chunk_dict["height"]
thumbnail_width = chunk_dict["width"]
# Add thumbnail to metadata
self.metadata["thumbnail"] = {
"image": thumbnail_img,
"height": thumbnail_height,
"width": thumbnail_width,
}
except KeyError:
raise RuntimeError("The specified file is not a valid LFR file.")
def _close(self):
# Close the reader.
# Note that the request object will close self._file
del self._data
def _get_length(self):
# Return the number of images. Can be np.inf
return 1
def _find_header(self):
"""
Checks if file has correct header and skip it.
"""
file_header = b"\x89LFP\x0d\x0a\x1a\x0a\x00\x00\x00\x01"
# Read and check header of file
header = self._file.read(HEADER_LENGTH)
if header != file_header:
raise RuntimeError("The LFR file header is invalid.")
# Read first bytes to skip header
self._file.read(SIZE_LENGTH)
def _find_chunks(self):
"""
Gets start position and size of data chunks in file.
"""
chunk_header = b"\x89LFC\x0d\x0a\x1a\x0a\x00\x00\x00\x00"
for i in range(0, DATA_CHUNKS_ILLUM):
data_pos, size, sha1 = self._get_chunk(chunk_header)
self._chunks[sha1] = (data_pos, size)
def _find_meta(self):
"""
Gets a data chunk that contains information over content
of other data chunks.
"""
meta_header = b"\x89LFM\x0d\x0a\x1a\x0a\x00\x00\x00\x00"
data_pos, size, sha1 = self._get_chunk(meta_header)
# Get content
self._file.seek(data_pos, 0)
data = self._file.read(size)
self._content = json.loads(data.decode("ASCII"))
def _get_chunk(self, header):
"""
Checks if chunk has correct header and skips it.
Finds start position and length of next chunk and reads
sha1-string that identifies the following data chunk.
Parameters
----------
header : bytes
Byte string that identifies start of chunk.
Returns
-------
data_pos : int
Start position of data chunk in file.
size : int
Size of data chunk.
sha1 : str
Sha1 value of chunk.
"""
# Read and check header of chunk
header_chunk = self._file.read(HEADER_LENGTH)
if header_chunk != header:
raise RuntimeError("The LFR chunk header is invalid.")
data_pos = None
sha1 = None
# Read size
size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0]
if size > 0:
# Read sha1
sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII"))
# Skip fixed null chars
self._file.read(PADDING_LENGTH)
# Find start of data and skip data
data_pos = self._file.tell()
self._file.seek(size, 1)
# Skip extra null chars
ch = self._file.read(1)
while ch == b"\0":
ch = self._file.read(1)
self._file.seek(-1, 1)
return data_pos, size, sha1
def _get_data(self, index):
# Return the data and meta data for the given index
if index not in [0, None]:
raise IndexError("Lytro lfr file contains only one dataset")
if not self._meta_only:
# Read bytes from string and convert to uint16
raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype(
np.uint16
)
im = LytroIllumRawFormat.rearrange_bits(raw)
else:
im = np.array([])
# Return array and dummy meta data
return im, self.metadata
def _get_meta_data(self, index):
# Get the meta data for the given index. If index is None,
# it returns the global meta data.
if index not in [0, None]:
raise IndexError("Lytro meta data file contains only one dataset")
return self.metadata
| LytroLfrFormat |
python | tornadoweb__tornado | tornado/test/escape_test.py | {
"start": 7550,
"end": 12330
} | class ____(unittest.TestCase):
def test_linkify(self):
for text, kwargs, html in linkify_tests:
linked = tornado.escape.linkify(text, **kwargs)
self.assertEqual(linked, html)
def test_xhtml_escape(self):
tests = [
("<foo>", "<foo>"),
("<foo>", "<foo>"),
(b"<foo>", b"<foo>"),
("<>&\"'", "<>&"'"),
("&", "&amp;"),
("<\u00e9>", "<\u00e9>"),
(b"<\xc3\xa9>", b"<\xc3\xa9>"),
] # type: List[Tuple[Union[str, bytes], Union[str, bytes]]]
for unescaped, escaped in tests:
self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
def test_xhtml_unescape_numeric(self):
tests = [
("foo bar", "foo bar"),
("foo bar", "foo bar"),
("foo bar", "foo bar"),
("foo઼bar", "foo\u0abcbar"),
("foo&#xyz;bar", "foo&#xyz;bar"), # invalid encoding
("foo&#;bar", "foo&#;bar"), # invalid encoding
("foo&#x;bar", "foo&#x;bar"), # invalid encoding
]
for escaped, unescaped in tests:
self.assertEqual(unescaped, xhtml_unescape(escaped))
def test_url_escape_unicode(self):
tests = [
# byte strings are passed through as-is
("\u00e9".encode(), "%C3%A9"),
("\u00e9".encode("latin1"), "%E9"),
# unicode strings become utf8
("\u00e9", "%C3%A9"),
] # type: List[Tuple[Union[str, bytes], str]]
for unescaped, escaped in tests:
self.assertEqual(url_escape(unescaped), escaped)
def test_url_unescape_unicode(self):
tests = [
("%C3%A9", "\u00e9", "utf8"),
("%C3%A9", "\u00c3\u00a9", "latin1"),
("%C3%A9", utf8("\u00e9"), None),
]
for escaped, unescaped, encoding in tests:
# input strings to url_unescape should only contain ascii
# characters, but make sure the function accepts both byte
# and unicode strings.
self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
def test_url_escape_quote_plus(self):
unescaped = "+ #%"
plus_escaped = "%2B+%23%25"
escaped = "%2B%20%23%25"
self.assertEqual(url_escape(unescaped), plus_escaped)
self.assertEqual(url_escape(unescaped, plus=False), escaped)
self.assertEqual(url_unescape(plus_escaped), unescaped)
self.assertEqual(url_unescape(escaped, plus=False), unescaped)
self.assertEqual(url_unescape(plus_escaped, encoding=None), utf8(unescaped))
self.assertEqual(
url_unescape(escaped, encoding=None, plus=False), utf8(unescaped)
)
def test_escape_return_types(self):
# On python2 the escape methods should generally return the same
# type as their argument
self.assertEqual(type(xhtml_escape("foo")), str)
self.assertEqual(type(xhtml_escape("foo")), unicode_type)
def test_json_decode(self):
# json_decode accepts both bytes and unicode, but strings it returns
# are always unicode.
self.assertEqual(json_decode(b'"foo"'), "foo")
self.assertEqual(json_decode('"foo"'), "foo")
# Non-ascii bytes are interpreted as utf8
self.assertEqual(json_decode(utf8('"\u00e9"')), "\u00e9")
def test_json_encode(self):
# json deals with strings, not bytes. On python 2 byte strings will
# convert automatically if they are utf8; on python 3 byte strings
# are not allowed.
self.assertEqual(json_decode(json_encode("\u00e9")), "\u00e9")
if bytes is str:
self.assertEqual(json_decode(json_encode(utf8("\u00e9"))), "\u00e9")
self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
def test_squeeze(self):
self.assertEqual(
squeeze("sequences of whitespace chars"),
"sequences of whitespace chars",
)
def test_recursive_unicode(self):
tests = {
"dict": {b"foo": b"bar"},
"list": [b"foo", b"bar"],
"tuple": (b"foo", b"bar"),
"bytes": b"foo",
}
self.assertEqual(recursive_unicode(tests["dict"]), {"foo": "bar"})
self.assertEqual(recursive_unicode(tests["list"]), ["foo", "bar"])
self.assertEqual(recursive_unicode(tests["tuple"]), ("foo", "bar"))
self.assertEqual(recursive_unicode(tests["bytes"]), "foo")
| EscapeTestCase |
python | doocs__leetcode | solution/2300-2399/2353.Design a Food Rating System/Solution.py | {
"start": 0,
"end": 849
} | class ____:
def __init__(self, foods: List[str], cuisines: List[str], ratings: List[int]):
self.d = defaultdict(SortedList)
self.g = {}
for food, cuisine, rating in zip(foods, cuisines, ratings):
self.d[cuisine].add((-rating, food))
self.g[food] = (rating, cuisine)
def changeRating(self, food: str, newRating: int) -> None:
oldRating, cuisine = self.g[food]
self.g[food] = (newRating, cuisine)
self.d[cuisine].remove((-oldRating, food))
self.d[cuisine].add((-newRating, food))
def highestRated(self, cuisine: str) -> str:
return self.d[cuisine][0][1]
# Your FoodRatings object will be instantiated and called as such:
# obj = FoodRatings(foods, cuisines, ratings)
# obj.changeRating(food,newRating)
# param_2 = obj.highestRated(cuisine)
| FoodRatings |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 467526,
"end": 468441
} | class ____(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self, code):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
| UnaryMinusNode |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 71464,
"end": 71617
} | class ____:
xlShiftDown = -4121 # from enum XlInsertShiftDirection
xlShiftToRight = -4161 # from enum XlInsertShiftDirection
| InsertShiftDirection |
python | spyder-ide__spyder | spyder/plugins/workingdirectory/confpage.py | {
"start": 574,
"end": 4665
} | class ____(PluginConfigPage):
def setup_page(self):
about_label = QLabel(
_("This is the directory that will be set as the default for "
"the IPython console and Files panes.")
)
about_label.setWordWrap(True)
# Startup directory
startup_group = QGroupBox(_("Startup"))
startup_bg = QButtonGroup(startup_group)
startup_label = QLabel(
_("At startup, the working directory is:")
)
startup_label.setWordWrap(True)
lastdir_radio = self.create_radiobutton(
_("The project (if open) or user home directory"),
'startup/use_project_or_home_directory',
tip=_("The startup working dir will be root of the "
"current project if one is open, otherwise the "
"user home directory"),
button_group=startup_bg
)
thisdir_radio = self.create_radiobutton(
_("The following directory:"),
'startup/use_fixed_directory',
_("At startup, the current working directory will be the "
"specified path"),
button_group=startup_bg
)
thisdir_bd = self.create_browsedir(
"",
'startup/fixed_directory',
getcwd_or_home()
)
thisdir_radio.radiobutton.toggled.connect(thisdir_bd.setEnabled)
lastdir_radio.radiobutton.toggled.connect(thisdir_bd.setDisabled)
thisdir_layout = QHBoxLayout()
thisdir_layout.addWidget(thisdir_radio)
thisdir_layout.addWidget(thisdir_bd)
startup_layout = QVBoxLayout()
startup_layout.addWidget(startup_label)
startup_layout.addWidget(lastdir_radio)
startup_layout.addLayout(thisdir_layout)
startup_group.setLayout(startup_layout)
# Console Directory
console_group = QGroupBox(_("New consoles"))
console_label = QLabel(
_("The working directory for new IPython consoles is:")
)
console_label.setWordWrap(True)
console_bg = QButtonGroup(console_group)
console_project_radio = self.create_radiobutton(
_("The project (if open) or user home directory"),
'console/use_project_or_home_directory',
tip=_("The working dir for new consoles will be root of the "
"project if one is open, otherwise the user home directory"),
button_group=console_bg
)
console_cwd_radio = self.create_radiobutton(
_("The working directory of the current console"),
'console/use_cwd',
button_group=console_bg
)
console_dir_radio = self.create_radiobutton(
_("The following directory:"),
'console/use_fixed_directory',
_("The directory when a new console is open will be the "
"specified path"),
button_group=console_bg
)
console_dir_bd = self.create_browsedir(
"",
'console/fixed_directory',
getcwd_or_home()
)
console_dir_radio.radiobutton.toggled.connect(console_dir_bd.setEnabled)
console_project_radio.radiobutton.toggled.connect(console_dir_bd.setDisabled)
console_cwd_radio.radiobutton.toggled.connect(console_dir_bd.setDisabled)
console_dir_layout = QHBoxLayout()
console_dir_layout.addWidget(console_dir_radio)
console_dir_layout.addWidget(console_dir_bd)
console_layout = QVBoxLayout()
console_layout.addWidget(console_label)
console_layout.addWidget(console_project_radio)
console_layout.addWidget(console_cwd_radio)
console_layout.addLayout(console_dir_layout)
console_group.setLayout(console_layout)
vlayout = QVBoxLayout()
vlayout.addWidget(about_label)
vlayout.addSpacing(10)
vlayout.addWidget(startup_group)
vlayout.addWidget(console_group)
vlayout.addStretch(1)
self.setLayout(vlayout)
| WorkingDirectoryConfigPage |
python | dagster-io__dagster | examples/docs_projects/project_components_pdf_extraction/project_components_pdf_extraction/lib/pdf_extraction.py | {
"start": 1885,
"end": 10923
} | class ____(dg.Component, dg.Resolvable):
"""A component for extracting and validating text from PDF documents.
This component provides a complete PDF text extraction pipeline that:
1. Converts PDF pages to high-quality images
2. Performs OCR text extraction using Tesseract
3. Validates extraction quality using OpenAI
4. Generates detailed extraction reports and metrics
The component creates three main assets:
- {pdf_name}_convert_to_image: Converts PDF pages to images
- {pdf_name}_extract_text: Extracts text from the images using OCR
- Asset check on extract_text: Validates extraction quality using OpenAI
Configuration:
pdf_path (str): Path to the PDF document to process
output_dir (str): Directory for storing extracted images and text
asset_specs (Sequence[ResolvedAssetSpec]): Asset specifications for the pipeline
language (str): OCR language code (e.g., 'eng' for English)
dpi (int): DPI resolution for PDF to image conversion
openai_api_key (str): OpenAI API key for validation
openai_model (str): OpenAI model to use for validation
validation_score (int): Minimum acceptable validation score (1-10)
"""
pdf_dir: str
output_dir: str
asset_specs: Sequence[dg.ResolvedAssetSpec]
validation_score: int = 7
language: str = "eng"
dpi: int = 300
openai_model: str = "gpt-4o-mini"
def _normalize_key(self, key: str) -> str:
return key.replace("-", "_")
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
# Initialize pdf_paths as an empty list
pdf_paths = []
# Get all PDF files from the directory
if os.path.isdir(self.pdf_dir):
pdf_files = [f for f in os.listdir(self.pdf_dir) if f.lower().endswith(".pdf")]
pdf_paths = [os.path.join(self.pdf_dir, pdf) for pdf in pdf_files]
# Create single shared PDF extractor resource
pdf_extractor_resource = PDFTextExtractor(
language=self.language,
dpi=self.dpi,
openai_api_key=dg.EnvVar("OPENAI_API_KEY"),
preprocess=True,
output_dir=self.output_dir, # Base output directory
)
assets = []
asset_checks = []
all_jobs = []
# Create assets for each PDF file
for pdf_path in pdf_paths:
key_prefix = self._normalize_key(f"{Path(pdf_path).stem}")
# Create convert_to_image asset with captured pdf_path
def _make_convert_to_image(pdf_path=pdf_path, key_prefix=key_prefix):
@dg.asset(
name=f"{key_prefix}_convert_to_image",
group_name="pdf_extraction",
)
def convert_to_image(
context: dg.AssetExecutionContext, pdf_extractor: PDFTextExtractor
):
"""Convert PDF to images, one per page."""
return pdf_extractor.convert_pdf_to_images(
pdf_path, output_folder=os.path.join(self.output_dir, key_prefix)
)
return convert_to_image
# Create extract_text asset with captured key_prefix
def _make_extract_text(key_prefix=key_prefix, pdf_path=pdf_path):
@dg.asset(
name=f"{key_prefix}_extract_text",
deps=[f"{key_prefix}_convert_to_image"],
group_name="pdf_extraction",
)
def extract_text(
context: dg.AssetExecutionContext, pdf_extractor: PDFTextExtractor
):
"""Extract text from the converted images using OCR."""
context.log.info(f"Extracting text for PDF: {key_prefix}")
# Define the output directory and ensure it exists
pdf_output_dir = os.path.join(self.output_dir, key_prefix)
# Extract text and save directly to output directory
extraction_result = pdf_extractor.extract_text_from_images(key_prefix)
# Format the extracted text as markdown
markdown_text = f"# Extracted Text from {Path(pdf_path).name}\n\n"
for page in extraction_result["pages"]:
markdown_text += f"## Page {page['page']}\n\n"
markdown_text += f"{page['text']}\n\n"
# Save markdown version to output directory
markdown_file = os.path.join(pdf_output_dir, "extracted_text.md")
with open(markdown_file, "w", encoding="utf-8") as f:
f.write(markdown_text)
return dg.Output(
value=extraction_result,
metadata={
"text": dg.MetadataValue.md(markdown_text),
"total_pages": dg.MetadataValue.int(extraction_result["total_pages"]),
"file_name": dg.MetadataValue.text(extraction_result["file"]),
"output_directory": dg.MetadataValue.path(pdf_output_dir),
"markdown_file": dg.MetadataValue.path(markdown_file),
"text_file": dg.MetadataValue.path(
os.path.join(pdf_output_dir, "extracted_text.txt")
),
},
)
return extract_text
# Create asset check with captured key_prefix
def _make_check_extraction_quality(key_prefix=key_prefix):
@dg.asset_check(asset=f"{key_prefix}_extract_text")
def check_extraction_quality(pdf_extractor: PDFTextExtractor):
"""Validate the extracted text quality using OpenAI."""
validation_result = pdf_extractor.validate_purchase_order(
key_prefix,
expected_fields=["document content", "text quality", "completeness"],
)
if not validation_result.get("validation_performed", False):
return dg.AssetCheckResult(
passed=False,
metadata={
"error": validation_result.get("error", "Unknown validation error")
},
)
ocr_quality_score = validation_result.get("ocr_quality", 0)
passed = ocr_quality_score >= self.validation_score
return dg.AssetCheckResult(
passed=passed,
metadata={
"ocr_quality_score": dg.MetadataValue.int(ocr_quality_score),
"identified_errors": dg.MetadataValue.json(
validation_result.get("identified_errors", [])
),
"key_information_found": dg.MetadataValue.json(
validation_result.get("key_information_found", [])
),
"headers_found": dg.MetadataValue.json(
validation_result.get("headers_found", {})
),
"missing_sections": dg.MetadataValue.json(
validation_result.get("missing_sections", [])
),
"is_complete_po": dg.MetadataValue.bool(
validation_result.get("is_complete_po", False)
),
},
)
return check_extraction_quality
# Create job for this PDF
pdf_job = dg.define_asset_job(
name=f"{key_prefix}_extraction_job",
selection=[f"{key_prefix}_convert_to_image", f"{key_prefix}_extract_text"],
)
# Add assets, checks, and job to their respective lists
assets.extend([_make_convert_to_image(), _make_extract_text()])
asset_checks.append(_make_check_extraction_quality())
all_jobs.append(pdf_job)
# Create a job that processes all PDFs
asset_names = []
for key_prefix in [self._normalize_key(Path(pdf).stem) for pdf in pdf_paths]:
asset_names.extend([f"{key_prefix}_convert_to_image", f"{key_prefix}_extract_text"])
all_pdfs_job = dg.define_asset_job(name="process_all_pdfs", selection=asset_names)
all_jobs.append(all_pdfs_job)
return dg.Definitions(
assets=assets,
asset_checks=asset_checks,
jobs=all_jobs,
resources={
"pdf_extractor": pdf_extractor_resource,
},
)
| PdfExtraction |
python | gevent__gevent | src/greentest/3.10/test_asyncore.py | {
"start": 26091,
"end": 26182
} | class ____(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = True
| TestAPI_UseIPv4Poll |
python | pytorch__pytorch | torch/nn/utils/_expanded_weights/linear_expanded_weights.py | {
"start": 327,
"end": 2259
} | class ____(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, _, __, *expanded_args_and_kwargs):
if len(expanded_args_and_kwargs[0].shape) <= 1:
raise RuntimeError(
"Input does not have a batch dimension. Expanded Weights expected input "
f"of at least rank 2, got of rank {len(expanded_args_and_kwargs[0].shape)}"
)
expanded_kwargs = {
"bias": expanded_args_and_kwargs[2]
if len(expanded_args_and_kwargs) == 3
else None
}
expanded_args = expanded_args_and_kwargs[:2]
ctx.batch_first = is_batch_first(expanded_args_and_kwargs)
output = forward_helper(F.linear, expanded_args, expanded_kwargs)
ctx.args = expanded_args
ctx.kwargs = expanded_kwargs
return output
@staticmethod
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
input, weight = ctx.args
bias = ctx.kwargs["bias"]
results: list[torch.Tensor | None] = []
results.append(None) # for kwarg_names
results.append(None) # for op reference
if input.requires_grad:
results.append(grad_output.matmul(unpack_expanded_weight_or_tensor(weight)))
else:
results.append(None)
results.extend([None] * 2) # weight and bias don't compute batched gradients
if not ctx.batch_first:
grad_output = grad_output.transpose(0, 1)
input = input.transpose(0, 1)
# weight and bias get their grad_sample fields set directly if they exist
set_grad_sample_if_exists(
weight, lambda _: torch.einsum("n...i,n...j->nij", grad_output, input)
)
set_grad_sample_if_exists(
bias, lambda _: torch.einsum("n...k->nk", grad_output)
)
return tuple(results)
| LinearPerSampleGrad |
python | langchain-ai__langchain | libs/partners/chroma/tests/integration_tests/fake_embeddings.py | {
"start": 987,
"end": 2002
} | class ____(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: list[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [1.0] * (self.dimensionality - 1) + [
float(self.known_texts.index(text)),
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> list[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
| ConsistentFakeEmbeddings |
python | anthropics__anthropic-sdk-python | tests/api_resources/beta/test_models.py | {
"start": 445,
"end": 3708
} | class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_retrieve(self, client: Anthropic) -> None:
model = client.beta.models.retrieve(
model_id="model_id",
)
assert_matches_type(BetaModelInfo, model, path=["response"])
@parametrize
def test_method_retrieve_with_all_params(self, client: Anthropic) -> None:
model = client.beta.models.retrieve(
model_id="model_id",
betas=["string"],
)
assert_matches_type(BetaModelInfo, model, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: Anthropic) -> None:
response = client.beta.models.with_raw_response.retrieve(
model_id="model_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
model = response.parse()
assert_matches_type(BetaModelInfo, model, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: Anthropic) -> None:
with client.beta.models.with_streaming_response.retrieve(
model_id="model_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
model = response.parse()
assert_matches_type(BetaModelInfo, model, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: Anthropic) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `model_id` but received ''"):
client.beta.models.with_raw_response.retrieve(
model_id="",
)
@parametrize
def test_method_list(self, client: Anthropic) -> None:
model = client.beta.models.list()
assert_matches_type(SyncPage[BetaModelInfo], model, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: Anthropic) -> None:
model = client.beta.models.list(
after_id="after_id",
before_id="before_id",
limit=1,
betas=["string"],
)
assert_matches_type(SyncPage[BetaModelInfo], model, path=["response"])
@parametrize
def test_raw_response_list(self, client: Anthropic) -> None:
response = client.beta.models.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
model = response.parse()
assert_matches_type(SyncPage[BetaModelInfo], model, path=["response"])
@parametrize
def test_streaming_response_list(self, client: Anthropic) -> None:
with client.beta.models.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
model = response.parse()
assert_matches_type(SyncPage[BetaModelInfo], model, path=["response"])
assert cast(Any, response.is_closed) is True
| TestModels |
python | astropy__astropy | astropy/wcs/wcsapi/fitswcs.py | {
"start": 6459,
"end": 33556
} | class ____(BaseLowLevelWCS, HighLevelWCSMixin):
"""
A mix-in class that is intended to be inherited by the
:class:`~astropy.wcs.WCS` class and provides the low- and high-level WCS API.
"""
@property
def pixel_n_dim(self):
return self.naxis
@property
def world_n_dim(self):
return len(self.wcs.ctype)
@property
def array_shape(self):
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@array_shape.setter
def array_shape(self, value):
if value is None:
self.pixel_shape = None
else:
self.pixel_shape = value[::-1]
@property
def pixel_shape(self):
if all(i == 0 for i in self._naxis):
return None
else:
return tuple(self._naxis)
@pixel_shape.setter
def pixel_shape(self, value):
if value is None:
self._naxis = self.naxis * [0]
else:
if len(value) != self.naxis:
raise ValueError(
f"The number of data axes, {self.naxis}, does not equal the shape"
f" {len(value)}."
)
self._naxis = list(value)
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
if value is None:
self._pixel_bounds = value
else:
if len(value) != self.naxis:
raise ValueError(
"The number of data axes, "
f"{self.naxis}, does not equal the number of "
f"pixel bounds {len(value)}."
)
self._pixel_bounds = list(value)
@property
def world_axis_physical_types(self):
types = []
# TODO: need to support e.g. TT(TAI)
for ctype in self.wcs.ctype:
if ctype.upper().startswith(("UT(", "TT(")):
types.append("time")
else:
ctype_name = ctype.split("-")[0]
for custom_mapping in CTYPE_TO_UCD1_CUSTOM:
if ctype_name in custom_mapping:
types.append(custom_mapping[ctype_name])
break
else:
types.append(CTYPE_TO_UCD1.get(ctype_name.upper(), None))
return types
@property
def world_axis_units(self):
units = []
for unit in self.wcs.cunit:
if unit is None:
unit = ""
elif isinstance(unit, u.Unit):
unit = unit.to_string(format="vounit")
else:
try:
unit = u.Unit(unit).to_string(format="vounit")
except u.UnitsError:
unit = ""
units.append(unit)
return units
@property
def world_axis_names(self):
return list(self.wcs.cname)
@property
def axis_correlation_matrix(self):
# If there are any distortions present, we assume that there may be
# correlations between all axes. Maybe if some distortions only apply
# to the image plane we can improve this?
if self.has_distortion:
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
# Assuming linear world coordinates along each axis, the correlation
# matrix would be given by whether or not the PC matrix is zero
matrix = self.wcs.get_pc() != 0
# We now need to check specifically for celestial coordinates since
# these can assume correlations because of spherical distortions. For
# each celestial coordinate we copy over the pixel dependencies from
# the other celestial coordinates.
celestial = (self.wcs.axis_types // 1000) % 10 == 2
celestial_indices = np.nonzero(celestial)[0]
for world1 in celestial_indices:
for world2 in celestial_indices:
if world1 != world2:
matrix[world1] |= matrix[world2]
matrix[world2] |= matrix[world1]
return matrix
def _out_of_bounds_to_nan(self, pixel_arrays):
if self.pixel_bounds is not None:
pixel_arrays = list(pixel_arrays)
for idim in range(self.pixel_n_dim):
if self.pixel_bounds[idim] is None:
continue
out_of_bounds = (pixel_arrays[idim] < self.pixel_bounds[idim][0]) | (
pixel_arrays[idim] > self.pixel_bounds[idim][1]
)
if np.any(out_of_bounds):
pix = pixel_arrays[idim]
if np.isscalar(pix):
pix = np.nan
else:
pix = pix.astype(float, copy=True)
pix[out_of_bounds] = np.nan
pixel_arrays[idim] = pix
return pixel_arrays
def pixel_to_world_values(self, *pixel_arrays):
pixel_arrays = self._out_of_bounds_to_nan(pixel_arrays)
world = self.all_pix2world(*pixel_arrays, 0)
return world[0] if self.world_n_dim == 1 else tuple(world)
def world_to_pixel_values(self, *world_arrays):
# avoid circular import
from astropy.wcs.wcs import NoConvergence
try:
pixel = self.all_world2pix(*world_arrays, 0)
except NoConvergence as e:
warnings.warn(str(e))
# use best_solution contained in the exception and format the same
# way as all_world2pix does (using _array_converter)
pixel = self._array_converter(
lambda *args: e.best_solution, "input", *world_arrays, 0
)
pixel = self._out_of_bounds_to_nan(pixel)
return pixel[0] if self.pixel_n_dim == 1 else tuple(pixel)
@property
def world_axis_object_components(self):
return self._get_components_and_classes()[0]
@property
def world_axis_object_classes(self):
return self._get_components_and_classes()[1]
@property
def serialized_classes(self):
return False
def _get_components_and_classes(self):
# The aim of this function is to return whatever is needed for
# world_axis_object_components and world_axis_object_classes. It's easier
# to figure it out in one go and then return the values and let the
# properties return part of it.
# Since this method might get called quite a few times, we need to cache
# it. We start off by defining a hash based on the attributes of the
# WCS that matter here (we can't just use the WCS object as a hash since
# it is mutable)
wcs_hash = (
self.naxis,
list(self.wcs.ctype),
list(self.wcs.cunit),
self.wcs.radesys,
self.wcs.specsys,
self.wcs.equinox,
self.wcs.dateobs,
self.wcs.lng,
self.wcs.lat,
)
# If the cache is present, we need to check that the 'hash' matches.
if (cache := getattr(self, "_components_and_classes_cache", None)) is not None:
if cache[0] == wcs_hash:
return cache[1]
else:
self._components_and_classes_cache = None
# Avoid circular imports by importing here
from astropy.coordinates import EarthLocation, SkyCoord, StokesCoord
from astropy.time import Time, TimeDelta
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.wcs.utils import wcs_to_celestial_frame
components = [None] * self.naxis
classes = {}
# Let's start off by checking whether the WCS has a pair of celestial
# components
if self.has_celestial:
try:
celestial_frame = wcs_to_celestial_frame(self)
except ValueError:
# Some WCSes, e.g. solar, can be recognized by WCSLIB as being
# celestial but we don't necessarily have frames for them.
celestial_frame = None
else:
kwargs = {}
kwargs["frame"] = celestial_frame
# Very occasionally (i.e. with TAB) wcs does not convert the units to degrees
kwargs["unit"] = (
u.Unit(self.wcs.cunit[self.wcs.lng]),
u.Unit(self.wcs.cunit[self.wcs.lat]),
)
classes["celestial"] = (SkyCoord, (), kwargs)
components[self.wcs.lng] = ("celestial", 0, "spherical.lon.degree")
components[self.wcs.lat] = ("celestial", 1, "spherical.lat.degree")
# Next, we check for spectral components
if self.has_spectral:
# Find index of spectral coordinate
ispec = self.wcs.spec
ctype = self.wcs.ctype[ispec][:4]
ctype = ctype.upper()
kwargs = {}
# Determine observer location and velocity
# TODO: determine how WCS standard would deal with observer on a
# spacecraft far from earth. For now assume the obsgeo parameters,
# if present, give the geocentric observer location.
if np.isnan(self.wcs.obsgeo[0]):
observer = None
else:
earth_location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
# Get the time scale from TIMESYS or fall back to 'utc'
tscale = self.wcs.timesys.lower() or "utc"
if np.isnan(self.wcs.mjdavg):
obstime = Time(
self.wcs.mjdobs,
format="mjd",
scale=tscale,
location=earth_location,
)
else:
obstime = Time(
self.wcs.mjdavg,
format="mjd",
scale=tscale,
location=earth_location,
)
observer_location = SkyCoord(earth_location.get_itrs(obstime=obstime))
if self.wcs.specsys in VELOCITY_FRAMES:
frame = VELOCITY_FRAMES[self.wcs.specsys]
observer = observer_location.transform_to(frame)
if isinstance(frame, str):
observer = attach_zero_velocities(observer)
else:
observer = update_differentials_to_match(
observer_location,
VELOCITY_FRAMES[self.wcs.specsys],
preserve_observer_frame=True,
)
elif self.wcs.specsys == "TOPOCENT":
observer = attach_zero_velocities(observer_location)
else:
raise NotImplementedError(
f"SPECSYS={self.wcs.specsys} not yet supported"
)
# Determine target
# This is tricker. In principle the target for each pixel is the
# celestial coordinates of the pixel, but we then need to be very
# careful about SSYSOBS which is tricky. For now, we set the
# target using the reference celestial coordinate in the WCS (if
# any).
if self.has_celestial and celestial_frame is not None:
# NOTE: celestial_frame was defined higher up
# NOTE: we set the distance explicitly to avoid warnings in SpectralCoord
target = SkyCoord(
self.wcs.crval[self.wcs.lng] * self.wcs.cunit[self.wcs.lng],
self.wcs.crval[self.wcs.lat] * self.wcs.cunit[self.wcs.lat],
frame=celestial_frame,
distance=1000 * u.kpc,
)
target = attach_zero_velocities(target)
else:
target = None
# SpectralCoord does not work properly if either observer or target
# are not convertible to ICRS, so if this is the case, we (for now)
# drop the observer and target from the SpectralCoord and warn the
# user.
if observer is not None:
try:
observer.transform_to(ICRS())
except Exception:
warnings.warn(
"observer cannot be converted to ICRS, so will "
"not be set on SpectralCoord",
AstropyUserWarning,
)
observer = None
if target is not None:
try:
target.transform_to(ICRS())
except Exception:
warnings.warn(
"target cannot be converted to ICRS, so will "
"not be set on SpectralCoord",
AstropyUserWarning,
)
target = None
# NOTE: below we include Quantity in classes['spectral'] instead
# of SpectralCoord - this is because we want to also be able to
# accept plain quantities.
if ctype == "ZOPT":
def spectralcoord_from_redshift(redshift):
if isinstance(redshift, SpectralCoord):
return redshift
return SpectralCoord(
(redshift + 1) * self.wcs.restwav,
unit=u.m,
observer=observer,
target=target,
)
def redshift_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(u.m) / self.wcs.restwav - 1.0
else:
return (
spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(u.m)
/ self.wcs.restwav
- 1.0
)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_redshift)
components[self.wcs.spec] = ("spectral", 0, redshift_from_spectralcoord)
elif ctype == "BETA":
def spectralcoord_from_beta(beta):
if isinstance(beta, SpectralCoord):
return beta
return SpectralCoord(
beta * C_SI,
unit=u.m / u.s,
doppler_convention="relativistic",
doppler_rest=self.wcs.restwav * u.m,
observer=observer,
target=target,
)
def beta_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
doppler_equiv = u.doppler_relativistic(self.wcs.restwav * u.m)
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(u.m / u.s, doppler_equiv) / C_SI
else:
return (
spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(u.m / u.s, doppler_equiv)
/ C_SI
)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_beta)
components[self.wcs.spec] = ("spectral", 0, beta_from_spectralcoord)
else:
kwargs["unit"] = self.wcs.cunit[ispec]
# Make sure that if restfrq is defined and restwav is not or
# vice-versa, we define the other one. Typically if e.g.
# RESTFRQ is defined in the original FITS header, wcs.restwav
# is 0.
if ctype in ("VELO", "VRAD", "VOPT"):
restfrq = self.wcs.restfrq
restwav = self.wcs.restwav
if restfrq > 0 or restwav > 0:
if restwav == 0:
restfrq = u.Quantity(restfrq, u.Hz)
restwav = restfrq.to(u.m, u.spectral())
elif restfrq == 0:
restwav = u.Quantity(restwav, u.m)
restfrq = restwav.to(u.Hz, u.spectral())
else:
restfrq = u.Quantity(restfrq, u.Hz)
restwav = u.Quantity(restwav, u.m)
restfrq_derived = restwav.to(u.Hz, u.spectral())
if not quantity_allclose(
restfrq, restfrq_derived, rtol=1e-4
):
used = "restwav" if ctype == "VOPT" else "restfrq"
warnings.warn(
f"restfrq={restfrq} and restwav={restwav}={restfrq_derived} "
f"are not consistent to rtol=1e-4, choosing {used}. In future, "
f"this will raise an exception.",
AstropyDeprecationWarning,
)
if ctype == "VELO":
kwargs["doppler_convention"] = "relativistic"
kwargs["doppler_rest"] = restfrq
elif ctype == "VRAD":
kwargs["doppler_convention"] = "radio"
kwargs["doppler_rest"] = restfrq
elif ctype == "VOPT":
kwargs["doppler_convention"] = "optical"
kwargs["doppler_rest"] = restwav
def spectralcoord_from_value(value):
if isinstance(value, SpectralCoord):
return value
return SpectralCoord(
value, observer=observer, target=target, **kwargs
)
def value_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(**kwargs)
else:
return spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(**kwargs)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_value)
components[self.wcs.spec] = ("spectral", 0, value_from_spectralcoord)
# We can then make sure we correctly return Time objects where appropriate
# (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
if "time" in self.world_axis_physical_types:
multiple_time = self.world_axis_physical_types.count("time") > 1
for i in range(self.naxis):
if self.world_axis_physical_types[i] == "time":
if multiple_time:
name = f"time.{i}"
else:
name = "time"
# Initialize delta
reference_time_delta = None
# Extract time scale, and remove any algorithm code
scale = self.wcs.ctype[i].split("-")[0].lower()
if scale == "time":
if self.wcs.timesys:
scale = self.wcs.timesys.lower()
else:
scale = "utc"
# Drop sub-scales
if "(" in scale:
pos = scale.index("(")
scale, subscale = scale[:pos], scale[pos + 1 : -1]
warnings.warn(
"Dropping unsupported sub-scale "
f"{subscale.upper()} from scale {scale.upper()}",
UserWarning,
)
# TODO: consider having GPS as a scale in Time
# For now GPS is not a scale, we approximate this by TAI - 19s
if scale == "gps":
reference_time_delta = TimeDelta(19, format="sec")
scale = "tai"
elif scale.upper() in FITS_DEPRECATED_SCALES:
scale = FITS_DEPRECATED_SCALES[scale.upper()]
elif scale not in Time.SCALES:
raise ValueError(f"Unrecognized time CTYPE={self.wcs.ctype[i]}")
# Determine location
trefpos = self.wcs.trefpos.lower()
if trefpos.startswith("topocent"):
# Note that some headers use TOPOCENT instead of TOPOCENTER
if np.any(np.isnan(self.wcs.obsgeo[:3])):
warnings.warn(
"Missing or incomplete observer location "
"information, setting location in Time to None",
UserWarning,
)
location = None
else:
location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
elif trefpos == "geocenter":
location = EarthLocation(0, 0, 0, unit=u.m)
elif trefpos == "":
location = None
else:
# TODO: implement support for more locations when Time supports it
warnings.warn(
f"Observation location '{trefpos}' is not "
"supported, setting location in Time to None",
UserWarning,
)
location = None
reference_time = Time(
np.nan_to_num(self.wcs.mjdref[0]),
np.nan_to_num(self.wcs.mjdref[1]),
format="mjd",
scale=scale,
location=location,
)
if reference_time_delta is not None:
reference_time = reference_time + reference_time_delta
def time_from_reference_and_offset(offset):
if isinstance(offset, Time):
return offset
return reference_time + TimeDelta(offset, format="sec")
def offset_from_time_and_reference(time):
return (time - reference_time).sec
classes[name] = (Time, (), {}, time_from_reference_and_offset)
components[i] = (name, 0, offset_from_time_and_reference)
if "phys.polarization.stokes" in self.world_axis_physical_types:
for i in range(self.naxis):
if self.world_axis_physical_types[i] == "phys.polarization.stokes":
name = "stokes"
classes[name] = (StokesCoord, (), {})
components[i] = (name, 0, "value")
# Fallback: for any remaining components that haven't been identified, just
# return Quantity as the class to use
for i in range(self.naxis):
if components[i] is None:
name = self.wcs.ctype[i].split("-")[0].lower()
if name == "":
name = "world"
while name in classes:
name += "_"
classes[name] = (u.Quantity, (), {"unit": self.wcs.cunit[i]})
components[i] = (name, 0, "value")
# Keep a cached version of result
self._components_and_classes_cache = wcs_hash, (components, classes)
return components, classes
| FITSWCSAPIMixin |
python | davidhalter__jedi | test/completion/pep0484_generic_passthroughs.py | {
"start": 6005,
"end": 6872
} | class ____:
def __call__(self, func: TCallable) -> TCallable:
...
#? class_decorator_factory_bound_callable()
class_decorator_factory_bound_callable()
#? Callable()
class_decorator_factory_bound_callable()()
is_decorated_by_class_bound_factory = class_decorator_factory_bound_callable()(will_be_decorated)
#? will_be_decorated
is_decorated_by_class_bound_factory
#? ['the_param=']
is_decorated_by_class_bound_factory(the_par
)
def decorator_factory_bound_callable() -> Callable[[TCallable], TCallable]:
pass
#? Callable()
decorator_factory_bound_callable()
#? Callable()
decorator_factory_bound_callable()()
is_decorated_by_bound_factory = decorator_factory_bound_callable()(will_be_decorated)
#? will_be_decorated
is_decorated_by_bound_factory
#? ['the_param=']
is_decorated_by_bound_factory(the_par
)
| class_decorator_factory_bound_callable |
python | modin-project__modin | modin/core/storage_formats/pandas/query_compiler.py | {
"start": 7903,
"end": 187466
} | class ____(BaseQueryCompiler):
"""
Query compiler for the pandas storage format.
This class translates common query compiler API into the DataFrame Algebra
queries, that is supposed to be executed by :py:class:`~modin.core.dataframe.pandas.dataframe.dataframe.PandasDataframe`.
Parameters
----------
modin_frame : PandasDataframe
Modin Frame to query with the compiled queries.
shape_hint : {"row", "column", None}, default: None
Shape hint for frames known to be a column or a row, otherwise None.
"""
_modin_frame: PandasDataframe
_shape_hint: Optional[str]
def __init__(self, modin_frame: PandasDataframe, shape_hint: Optional[str] = None):
self._modin_frame = modin_frame
self._shape_hint = shape_hint
storage_format = property(lambda self: self._modin_frame.storage_format)
engine = property(lambda self: self._modin_frame.engine)
@property
def lazy_row_labels(self):
"""
Whether the row labels are computed lazily.
Equivalent to `not self.frame_has_materialized_index`.
Returns
-------
bool
"""
return not self.frame_has_materialized_index
@property
def lazy_row_count(self):
"""
Whether the row count is computed lazily.
Equivalent to `not self.frame_has_materialized_index`.
Returns
-------
bool
"""
return not self.frame_has_materialized_index
@property
def lazy_column_types(self):
"""
Whether the dtypes are computed lazily.
Equivalent to `not self.frame_has_materialized_dtypes`.
Returns
-------
bool
"""
return not self.frame_has_materialized_dtypes
@property
def lazy_column_labels(self):
"""
Whether the column labels are computed lazily.
Equivalent to `not self.frame_has_materialized_columns`.
Returns
-------
bool
"""
return not self.frame_has_materialized_columns
@property
def lazy_column_count(self):
"""
Whether the column count is are computed lazily.
Equivalent to `not self.frame_has_materialized_columns`.
Returns
-------
bool
"""
return not self.frame_has_materialized_columns
# The default implementation of stay_cost will cache some information
# which will violate some assumptions in test_internals. Since this class
# is only used for non-hybrid operations we simply return 0 here for now.
def stay_cost(self, api_cls_name, operation, arguments):
return 0
def finalize(self):
self._modin_frame.finalize()
def execute(self):
self.finalize()
self._modin_frame.wait_computations()
def to_pandas(self):
return self._modin_frame.to_pandas()
@classmethod
def from_pandas(cls, df, data_cls):
return cls(data_cls.from_pandas(df))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
# Dataframe exchange protocol
def to_interchange_dataframe(
self, nan_as_null: bool = False, allow_copy: bool = True
):
return self._modin_frame.__dataframe__(
nan_as_null=nan_as_null, allow_copy=allow_copy
)
@classmethod
def from_interchange_dataframe(cls, df: ProtocolDataframe, data_cls):
return cls(data_cls.from_interchange_dataframe(df))
# END Dataframe exchange protocol
index: pandas.Index = property(_get_axis(0), _set_axis(0))
columns: pandas.Index = property(_get_axis(1), _set_axis(1))
def get_axis_len(self, axis: Literal[0, 1]) -> int:
"""
Return the length of the specified axis.
Parameters
----------
axis : {0, 1}
Axis to return labels on.
Returns
-------
int
"""
if axis == 0:
return len(self._modin_frame)
else:
return sum(self._modin_frame.column_widths)
@property
def dtypes(self) -> pandas.Series:
return self._modin_frame.dtypes
def get_dtypes_set(self):
return self._modin_frame.get_dtypes_set()
# END Index, columns, and dtypes objects
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
if axis == 1:
return self.__constructor__(
self._modin_frame.rename(new_col_labels=lambda x: f"{prefix}{x}")
)
else:
return self.__constructor__(
self._modin_frame.rename(new_row_labels=lambda x: f"{prefix}{x}")
)
def add_suffix(self, suffix, axis=1):
if axis == 1:
return self.__constructor__(
self._modin_frame.rename(new_col_labels=lambda x: f"{x}{suffix}")
)
else:
return self.__constructor__(
self._modin_frame.rename(new_row_labels=lambda x: f"{x}{suffix}")
)
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(self._modin_frame.copy(), self._shape_hint)
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def concat(self, axis, other, **kwargs):
if not isinstance(other, list):
other = [other]
assert all(
isinstance(o, type(self)) for o in other
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
if sort is None:
sort = False
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
other_modin_frame = [o._modin_frame for o in other]
new_modin_frame = self._modin_frame.concat(axis, other_modin_frame, join, sort)
result = self.__constructor__(new_modin_frame)
if ignore_index:
if axis == 0:
return result.reset_index(drop=True)
else:
result.columns = pandas.RangeIndex(len(result.columns))
return result
return result
# END Append/Concat/Join
# Data Management Methods
def free(self):
# TODO create a way to clean up this object.
return
# END Data Management Methods
# Data Movement Methods
def move_to(self, target_backend: str) -> Union[BaseQueryCompiler, Any]:
return NotImplemented
@classmethod
def move_from(cls, source_qc: BaseQueryCompiler) -> Union[BaseQueryCompiler, Any]:
return NotImplemented
# END Data Movement Methods
# To NumPy
def to_numpy(self, **kwargs):
return self._modin_frame.to_numpy(**kwargs)
# END To NumPy
# Binary operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
add = Binary.register(pandas.DataFrame.add, infer_dtypes="try_sample")
# 'combine' and 'combine_first' are working with UDFs, so it's better not so sample them
combine = Binary.register(pandas.DataFrame.combine, infer_dtypes="common_cast")
combine_first = Binary.register(
pandas.DataFrame.combine_first, infer_dtypes="common_cast"
)
eq = Binary.register(pandas.DataFrame.eq, infer_dtypes="bool")
equals = Binary.register(
lambda df, other: pandas.DataFrame([[df.equals(other)]]),
join_type=None,
labels="drop",
infer_dtypes="bool",
)
floordiv = Binary.register(pandas.DataFrame.floordiv, infer_dtypes="try_sample")
ge = Binary.register(pandas.DataFrame.ge, infer_dtypes="bool")
gt = Binary.register(pandas.DataFrame.gt, infer_dtypes="bool")
le = Binary.register(pandas.DataFrame.le, infer_dtypes="bool")
lt = Binary.register(pandas.DataFrame.lt, infer_dtypes="bool")
mod = Binary.register(pandas.DataFrame.mod, infer_dtypes="try_sample")
mul = Binary.register(pandas.DataFrame.mul, infer_dtypes="try_sample")
rmul = Binary.register(pandas.DataFrame.rmul, infer_dtypes="try_sample")
ne = Binary.register(pandas.DataFrame.ne, infer_dtypes="bool")
pow = Binary.register(pandas.DataFrame.pow, infer_dtypes="try_sample")
radd = Binary.register(pandas.DataFrame.radd, infer_dtypes="try_sample")
rfloordiv = Binary.register(pandas.DataFrame.rfloordiv, infer_dtypes="try_sample")
rmod = Binary.register(pandas.DataFrame.rmod, infer_dtypes="try_sample")
rpow = Binary.register(pandas.DataFrame.rpow, infer_dtypes="try_sample")
rsub = Binary.register(pandas.DataFrame.rsub, infer_dtypes="try_sample")
rtruediv = Binary.register(pandas.DataFrame.rtruediv, infer_dtypes="try_sample")
sub = Binary.register(pandas.DataFrame.sub, infer_dtypes="try_sample")
truediv = Binary.register(pandas.DataFrame.truediv, infer_dtypes="try_sample")
__and__ = Binary.register(pandas.DataFrame.__and__, infer_dtypes="bool")
__or__ = Binary.register(pandas.DataFrame.__or__, infer_dtypes="bool")
__rand__ = Binary.register(pandas.DataFrame.__rand__, infer_dtypes="bool")
__ror__ = Binary.register(pandas.DataFrame.__ror__, infer_dtypes="bool")
__rxor__ = Binary.register(pandas.DataFrame.__rxor__, infer_dtypes="bool")
__xor__ = Binary.register(pandas.DataFrame.__xor__, infer_dtypes="bool")
df_update = Binary.register(
copy_df_for_func(pandas.DataFrame.update, display_name="update"),
join_type="left",
sort=False,
)
series_update = Binary.register(
copy_df_for_func(
lambda x, y: pandas.Series.update(x.squeeze(axis=1), y.squeeze(axis=1)),
display_name="update",
),
join_type="left",
sort=False,
)
# Series logical operators take an additional fill_value flag that dataframe does not
series_eq = Binary.register(
_series_logical_binop(pandas.Series.eq), infer_dtypes="bool"
)
series_ge = Binary.register(
_series_logical_binop(pandas.Series.ge), infer_dtypes="bool"
)
series_gt = Binary.register(
_series_logical_binop(pandas.Series.gt), infer_dtypes="bool"
)
series_le = Binary.register(
_series_logical_binop(pandas.Series.le), infer_dtypes="bool"
)
series_lt = Binary.register(
_series_logical_binop(pandas.Series.lt), infer_dtypes="bool"
)
series_ne = Binary.register(
_series_logical_binop(pandas.Series.ne), infer_dtypes="bool"
)
# Needed for numpy API
_logical_and = Binary.register(
lambda df, other, *args, **kwargs: pandas.DataFrame(
np.logical_and(df, other, *args, **kwargs)
),
infer_dtypes="bool",
)
_logical_or = Binary.register(
lambda df, other, *args, **kwargs: pandas.DataFrame(
np.logical_or(df, other, *args, **kwargs)
),
infer_dtypes="bool",
)
_logical_xor = Binary.register(
lambda df, other, *args, **kwargs: pandas.DataFrame(
np.logical_xor(df, other, *args, **kwargs)
),
infer_dtypes="bool",
)
def where(self, cond, other, **kwargs):
assert isinstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
# it's doesn't work if `other` is Series._query_compiler because
# `n_ary_op` performs columns copartition both for `cond` and `other`.
if isinstance(other, type(self)) and other._shape_hint is not None:
other = other.to_pandas()
if isinstance(other, type(self)):
# Make sure to set join_type=None so the `where` result always has
# the same row and column labels as `self`.
new_modin_frame = self._modin_frame.n_ary_op(
lambda df, cond, other: df.where(cond, other, **kwargs),
[
cond._modin_frame,
other._modin_frame,
],
join_type=None,
)
# This will be a Series of scalars to be applied based on the condition
# dataframe.
else:
def where_builder_series(df, cond):
return df.where(cond, other, **kwargs)
new_modin_frame = self._modin_frame.n_ary_op(
where_builder_series, [cond._modin_frame], join_type="left"
)
return self.__constructor__(new_modin_frame)
def merge(self, right, **kwargs):
if RangePartitioning.get():
try:
return MergeImpl.range_partitioning_merge(self, right, kwargs)
except NotImplementedError as e:
message = (
f"Can't use range-partitioning merge implementation because of: {e}"
+ "\nFalling back to a row-axis implementation."
)
get_logger().info(message)
return MergeImpl.row_axis_merge(self, right, kwargs)
def join(self, right: PandasQueryCompiler, **kwargs) -> PandasQueryCompiler:
on = kwargs.get("on", None)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
left = self
if how in ["left", "inner"] or (
how == "right" and right._modin_frame._partitions.size != 0
):
reverted = False
if how == "right":
left, right = right, left
reverted = True
def map_func(
left, right, kwargs=kwargs
) -> pandas.DataFrame: # pragma: no cover
if reverted:
df = pandas.DataFrame.join(right, left, **kwargs)
else:
df = pandas.DataFrame.join(left, right, **kwargs)
return df
right_to_broadcast = right._modin_frame.combine()
left = left.__constructor__(
left._modin_frame.broadcast_apply_full_axis(
axis=1,
func=map_func,
# We're going to explicitly change the shape across the 1-axis,
# so we want for partitioning to adapt as well
keep_partitioning=False,
num_splits=merge_partitioning(
left._modin_frame, right._modin_frame, axis=1
),
other=right_to_broadcast,
)
)
return left.sort_rows_by_column_values(on) if sort else left
else:
return left.default_to_pandas(pandas.DataFrame.join, right, **kwargs)
# END Inter-Data operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
new_index, indexer = (self.index, None) if axis else self.index.reindex(labels)
new_columns, _ = self.columns.reindex(labels) if axis else (self.columns, None)
new_dtypes = None
if self.frame_has_materialized_dtypes and kwargs.get("method", None) is None:
# For columns, defining types is easier because we don't have to calculate the common
# type, since the entire column is filled. A simple `reindex` covers our needs.
# For rows, we can avoid calculating common types if we know that no new strings of
# arbitrary type have been added (this information is in `indexer`).
dtype = pandas.Index([kwargs.get("fill_value", np.nan)]).dtype
if axis == 0:
new_dtypes = self.dtypes.copy()
# "-1" means that the required labels are missing in the dataframe and the
# corresponding rows will be filled with "fill_value" that may change the column type.
if indexer is not None and -1 in indexer:
for col, col_dtype in new_dtypes.items():
new_dtypes[col] = find_common_type((col_dtype, dtype))
else:
new_dtypes = self.dtypes.reindex(labels, fill_value=dtype)
new_modin_frame = self._modin_frame.apply_full_axis(
axis,
lambda df: df.reindex(labels=labels, axis=axis, **kwargs),
new_index=new_index,
new_columns=new_columns,
dtypes=new_dtypes,
)
return self.__constructor__(new_modin_frame)
def reset_index(self, **kwargs) -> PandasQueryCompiler:
if self.lazy_row_labels:
def _reset(df, *axis_lengths, partition_idx): # pragma: no cover
df = df.reset_index(**kwargs)
if isinstance(df.index, pandas.RangeIndex):
# If the resulting index is a pure RangeIndex that means that
# `.reset_index` actually dropped all of the levels of the
# original index and so we have to recompute it manually for each partition
start = sum(axis_lengths[:partition_idx])
stop = sum(axis_lengths[: partition_idx + 1])
df.index = pandas.RangeIndex(start, stop)
return df
new_columns = None
if kwargs["drop"]:
dtypes = self._modin_frame.copy_dtypes_cache()
if self.frame_has_columns_cache:
new_columns = self._modin_frame.copy_columns_cache(
copy_lengths=True
)
else:
# concat index dtypes with column dtypes
index_dtypes = self._modin_frame._index_cache.maybe_get_dtypes()
try:
dtypes = ModinDtypes.concat(
[
index_dtypes,
self._modin_frame._dtypes,
]
)
except NotImplementedError:
# may raise on duplicated names in materialized 'self.dtypes'
dtypes = None
if (
# can precompute new columns if we know columns and index names
self.frame_has_materialized_columns
and index_dtypes is not None
):
empty_index = (
pandas.Index([0], name=index_dtypes.index[0])
if len(index_dtypes) == 1
else pandas.MultiIndex.from_arrays(
[[i] for i in range(len(index_dtypes))],
names=index_dtypes.index,
)
)
new_columns = (
pandas.DataFrame(columns=self.columns, index=empty_index)
.reset_index(**kwargs)
.columns
)
return self.__constructor__(
self._modin_frame.apply_full_axis(
axis=1,
func=_reset,
enumerate_partitions=True,
new_columns=new_columns,
dtypes=dtypes,
sync_labels=False,
pass_axis_lengths_to_partitions=True,
)
)
allow_duplicates = kwargs.pop("allow_duplicates", lib.no_default)
names = kwargs.pop("names", None)
if allow_duplicates not in (lib.no_default, False) or names is not None:
return self.default_to_pandas(
pandas.DataFrame.reset_index,
allow_duplicates=allow_duplicates,
names=names,
**kwargs,
)
drop = kwargs.get("drop", False)
level = kwargs.get("level", None)
new_index = None
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
uniq_sorted_level = sorted(set(level))
if len(uniq_sorted_level) < self.index.nlevels:
# We handle this by separately computing the index. We could just
# put the labels into the data and pull them back out, but that is
# expensive.
new_index = (
self.index.droplevel(uniq_sorted_level)
if len(level) < self.index.nlevels
else pandas.RangeIndex(len(self.index))
)
elif not drop:
uniq_sorted_level = list(range(self.index.nlevels))
if not drop:
if len(uniq_sorted_level) < self.index.nlevels:
# These are the index levels that will remain after the reset_index
keep_levels = [
i for i in range(self.index.nlevels) if i not in uniq_sorted_level
]
new_copy = self.copy()
# Change the index to have only the levels that will be inserted
# into the data. We will replace the old levels later.
new_copy.index = self.index.droplevel(keep_levels)
new_copy.index.names = [
(
"level_{}".format(level_value)
if new_copy.index.names[level_index] is None
else new_copy.index.names[level_index]
)
for level_index, level_value in enumerate(uniq_sorted_level)
]
new_modin_frame = new_copy._modin_frame.from_labels()
# Replace the levels that will remain as a part of the index.
new_modin_frame.index = new_index
else:
new_modin_frame = self._modin_frame.from_labels()
if isinstance(new_modin_frame.columns, pandas.MultiIndex):
# Fix col_level and col_fill in generated column names because from_labels works with assumption
# that col_level and col_fill are not specified but it expands tuples in level names.
col_level = kwargs.get("col_level", 0)
col_fill = kwargs.get("col_fill", "")
if col_level != 0 or col_fill != "":
# Modify generated column names if col_level and col_fil have values different from default.
levels_names_list = [
f"level_{level_index}" if level_name is None else level_name
for level_index, level_name in enumerate(self.index.names)
]
if col_fill is None:
# Initialize col_fill if it is None.
# This is some weird undocumented Pandas behavior to take first
# element of the last column name.
last_col_name = levels_names_list[uniq_sorted_level[-1]]
last_col_name = (
list(last_col_name)
if isinstance(last_col_name, tuple)
else [last_col_name]
)
if len(last_col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
+ f"with incomplete column name {last_col_name}"
)
col_fill = last_col_name[0]
columns_list = new_modin_frame.columns.tolist()
for level_index, level_value in enumerate(uniq_sorted_level):
level_name = levels_names_list[level_value]
# Expand tuples into separate items and fill the rest with col_fill
top_level = [col_fill] * col_level
middle_level = (
list(level_name)
if isinstance(level_name, tuple)
else [level_name]
)
bottom_level = [col_fill] * (
self.columns.nlevels - (col_level + len(middle_level))
)
item = tuple(top_level + middle_level + bottom_level)
if len(item) > self.columns.nlevels:
raise ValueError(
"Item must have length equal to number of levels."
)
columns_list[level_index] = item
new_modin_frame.columns = pandas.MultiIndex.from_tuples(
columns_list, names=self.columns.names
)
new_self = self.__constructor__(new_modin_frame)
else:
new_self = self.copy()
new_self.index = (
# Cheaper to compute row lengths than index
pandas.RangeIndex(sum(new_self._modin_frame.row_lengths))
if new_index is None
else new_index
)
return new_self
def set_index_from_columns(
self, keys: List[Hashable], drop: bool = True, append: bool = False
):
new_modin_frame = self._modin_frame.to_labels(keys)
if append:
arrays = []
# Appending keeps the original order of the index levels, then appends the
# new index objects.
names = list(self.index.names)
if isinstance(self.index, pandas.MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
# Add the names in the correct order.
names.extend(new_modin_frame.index.names)
if isinstance(new_modin_frame.index, pandas.MultiIndex):
for i in range(new_modin_frame.index.nlevels):
arrays.append(new_modin_frame.index._get_level_values(i))
else:
arrays.append(new_modin_frame.index)
new_modin_frame.index = ensure_index_from_sequences(arrays, names)
if not drop:
# The algebraic operator for this operation always drops the column, but we
# can copy the data in this object and just use the index from the result of
# the query compiler call.
result = self._modin_frame.copy()
result.index = new_modin_frame.index
else:
result = new_modin_frame
return self.__constructor__(result)
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs) -> PandasQueryCompiler:
# Switch the index and columns and transpose the data within the blocks.
return self.__constructor__(self._modin_frame.transpose())
def is_series_like(self):
return len(self.columns) == 1 or len(self.index) == 1
# END Transpose
# TreeReduce operations
count = TreeReduce.register(pandas.DataFrame.count, pandas.DataFrame.sum)
def _dtypes_sum(dtypes: pandas.Series, *func_args, **func_kwargs): # noqa: GL08
# The common type evaluation for `TreeReduce` operator may differ depending
# on the pandas function, so it's better to pass a evaluation function that
# should be defined for each Modin's function.
return find_common_type(dtypes.tolist())
sum = TreeReduce.register(pandas.DataFrame.sum, compute_dtypes=_dtypes_sum)
prod = TreeReduce.register(pandas.DataFrame.prod)
any = TreeReduce.register(pandas.DataFrame.any, pandas.DataFrame.any)
all = TreeReduce.register(pandas.DataFrame.all, pandas.DataFrame.all)
# memory_usage adds an extra column for index usage, but we don't want to distribute
# the index memory usage calculation.
_memory_usage_without_index = TreeReduce.register(
pandas.DataFrame.memory_usage,
lambda x, *args, **kwargs: pandas.DataFrame.sum(x),
axis=0,
)
def memory_usage(self, **kwargs):
index = kwargs.get("index", True)
deep = kwargs.get("deep", False)
usage_without_index = self._memory_usage_without_index(index=False, deep=deep)
return (
self.from_pandas(
pandas.DataFrame(
[self.index.memory_usage()],
columns=["Index"],
index=[MODIN_UNNAMED_SERIES_LABEL],
),
data_cls=type(self._modin_frame),
).concat(axis=1, other=[usage_without_index])
if index
else usage_without_index
)
def max(self, axis, **kwargs):
def map_func(df, **kwargs):
return pandas.DataFrame.max(df, **kwargs)
def reduce_func(df, **kwargs):
if kwargs.get("numeric_only", False):
kwargs = kwargs.copy()
kwargs["numeric_only"] = False
return pandas.DataFrame.max(df, **kwargs)
return TreeReduce.register(map_func, reduce_func)(self, axis=axis, **kwargs)
def min(self, axis, **kwargs):
def map_func(df, **kwargs):
return pandas.DataFrame.min(df, **kwargs)
def reduce_func(df, **kwargs):
if kwargs.get("numeric_only", False):
kwargs = kwargs.copy()
kwargs["numeric_only"] = False
return pandas.DataFrame.min(df, **kwargs)
return TreeReduce.register(map_func, reduce_func)(self, axis=axis, **kwargs)
def mean(self, axis, **kwargs):
if kwargs.get("level") is not None or axis is None:
return self.default_to_pandas(pandas.DataFrame.mean, axis=axis, **kwargs)
skipna = kwargs.get("skipna", True)
# TODO-FIX: this function may work incorrectly with user-defined "numeric" values.
# Since `count(numeric_only=True)` discards all unknown "numeric" types, we can get incorrect
# divisor inside the reduce function.
def map_fn(df, numeric_only=False, **kwargs):
"""
Perform Map phase of the `mean`.
Compute sum and number of elements in a given partition.
"""
result = pandas.DataFrame(
{
"sum": df.sum(axis=axis, skipna=skipna, numeric_only=numeric_only),
"count": df.count(axis=axis, numeric_only=numeric_only),
}
)
return result if axis else result.T
def reduce_fn(df, **kwargs):
"""
Perform Reduce phase of the `mean`.
Compute sum for all the the partitions and divide it to
the total number of elements.
"""
sum_cols = df["sum"] if axis else df.loc["sum"]
count_cols = df["count"] if axis else df.loc["count"]
if not isinstance(sum_cols, pandas.Series):
# If we got `NaN` as the result of the sum in any axis partition,
# then we must consider the whole sum as `NaN`, so setting `skipna=False`
sum_cols = sum_cols.sum(axis=axis, skipna=False)
count_cols = count_cols.sum(axis=axis, skipna=False)
return sum_cols / count_cols
def compute_dtypes_fn(dtypes, axis, **kwargs):
"""
Compute the resulting Series dtype.
When computing along rows and there are numeric and boolean columns
Pandas returns `object`. In all other cases - `float64`.
"""
if (
axis == 1
and any(is_bool_dtype(t) for t in dtypes)
and any(is_numeric_dtype(t) for t in dtypes)
):
return "object"
return "float64"
return TreeReduce.register(
map_fn,
reduce_fn,
compute_dtypes=compute_dtypes_fn,
)(self, axis=axis, **kwargs)
# END TreeReduce operations
# Reduce operations
idxmax = Reduce.register(pandas.DataFrame.idxmax)
idxmin = Reduce.register(pandas.DataFrame.idxmin)
def median(self, axis, **kwargs):
if axis is None:
return self.default_to_pandas(pandas.DataFrame.median, axis=axis, **kwargs)
return Reduce.register(pandas.DataFrame.median)(self, axis=axis, **kwargs)
def nunique(self, axis=0, dropna=True):
if not RangePartitioning.get():
return Reduce.register(pandas.DataFrame.nunique)(
self, axis=axis, dropna=dropna
)
unsupported_message = ""
if axis != 0:
unsupported_message += (
"Range-partitioning 'nunique()' is only supported for 'axis=0'.\n"
)
if len(self.columns) > 1:
unsupported_message += "Range-partitioning 'nunique()' is only supported for a signle-column dataframe.\n"
if len(unsupported_message) > 0:
message = (
f"Can't use range-partitioning implementation for 'nunique' because:\n{unsupported_message}"
+ "Falling back to a full-axis reduce implementation."
)
get_logger().info(message)
ErrorMessage.warn(message)
return Reduce.register(pandas.DataFrame.nunique)(
self, axis=axis, dropna=dropna
)
# compute '.nunique()' for each row partitions
new_modin_frame = self._modin_frame._apply_func_to_range_partitioning(
key_columns=self.columns.tolist(),
func=lambda df: df.nunique(dropna=dropna).to_frame(),
)
# sum the results of each row part to get the final value
new_modin_frame = new_modin_frame.reduce(axis=0, function=lambda df: df.sum())
return self.__constructor__(new_modin_frame, shape_hint="column")
def skew(self, axis, **kwargs):
if axis is None:
return self.default_to_pandas(pandas.DataFrame.skew, axis=axis, **kwargs)
return Reduce.register(pandas.DataFrame.skew)(self, axis=axis, **kwargs)
def kurt(self, axis, **kwargs):
if axis is None:
return self.default_to_pandas(pandas.DataFrame.kurt, axis=axis, **kwargs)
return Reduce.register(pandas.DataFrame.kurt)(self, axis=axis, **kwargs)
sem = Reduce.register(pandas.DataFrame.sem)
std = Reduce.register(pandas.DataFrame.std)
var = Reduce.register(pandas.DataFrame.var)
sum_min_count = Reduce.register(pandas.DataFrame.sum)
prod_min_count = Reduce.register(pandas.DataFrame.prod)
quantile_for_single_value = Reduce.register(pandas.DataFrame.quantile)
def to_datetime(self, *args, **kwargs):
if len(self.columns) == 1:
return Map.register(
# to_datetime has inplace side effects, see GH#3063
lambda df, *args, **kwargs: pandas.to_datetime(
df.squeeze(axis=1), *args, **kwargs
).to_frame(),
shape_hint="column",
)(self, *args, **kwargs)
else:
return Reduce.register(pandas.to_datetime, axis=1, shape_hint="column")(
self, *args, **kwargs
)
# END Reduce operations
def _resample_func(
self,
resample_kwargs,
func_name,
new_columns=None,
df_op=None,
allow_range_impl=True,
*args,
**kwargs,
):
"""
Resample underlying time-series data and apply aggregation on it.
Parameters
----------
resample_kwargs : dict
Resample parameters in the format of ``modin.pandas.DataFrame.resample`` signature.
func_name : str
Aggregation function name to apply on resampler object.
new_columns : list of labels, optional
Actual column labels of the resulted frame, supposed to be a hint for the
Modin frame. If not specified will be computed automaticly.
df_op : callable(pandas.DataFrame) -> [pandas.DataFrame, pandas.Series], optional
Preprocessor function to apply to the passed frame before resampling.
allow_range_impl : bool, default: True
Whether to use range-partitioning if ``RangePartitioning.get() is True``.
*args : args
Arguments to pass to the aggregation function.
**kwargs : kwargs
Arguments to pass to the aggregation function.
Returns
-------
PandasQueryCompiler
New QueryCompiler containing the result of resample aggregation.
"""
from modin.core.dataframe.pandas.dataframe.utils import ShuffleResample
def map_func(df, resample_kwargs=resample_kwargs): # pragma: no cover
"""Resample time-series data of the passed frame and apply aggregation function on it."""
if len(df) == 0:
if resample_kwargs["on"] is not None:
df = df.set_index(resample_kwargs["on"])
return df
if "bin_bounds" in df.attrs:
timestamps = df.attrs["bin_bounds"]
if isinstance(df.index, pandas.MultiIndex):
level_to_keep = resample_kwargs["level"]
if isinstance(level_to_keep, int):
to_drop = [
lvl
for lvl in range(df.index.nlevels)
if lvl != level_to_keep
]
else:
to_drop = [
lvl for lvl in df.index.names if lvl != level_to_keep
]
df.index = df.index.droplevel(to_drop)
resample_kwargs = resample_kwargs.copy()
resample_kwargs["level"] = None
filler = pandas.DataFrame(
np.nan, index=pandas.Index(timestamps), columns=df.columns
)
df = pandas.concat([df, filler], copy=False)
if df_op is not None:
df = df_op(df)
resampled_val = df.resample(**resample_kwargs)
op = getattr(pandas.core.resample.Resampler, func_name)
if callable(op):
try:
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
val = op(resampled_val, *args, **kwargs)
except ValueError:
resampled_val = df.copy().resample(**resample_kwargs)
val = op(resampled_val, *args, **kwargs)
else:
val = getattr(resampled_val, func_name)
if isinstance(val, pandas.Series):
return val.to_frame()
else:
return val
if resample_kwargs["on"] is None:
level = [
0 if resample_kwargs["level"] is None else resample_kwargs["level"]
]
key_columns = []
else:
level = None
key_columns = [resample_kwargs["on"]]
if (
not allow_range_impl
or resample_kwargs["axis"] not in (0, "index")
or not RangePartitioning.get()
):
new_modin_frame = self._modin_frame.apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
else:
new_modin_frame = self._modin_frame._apply_func_to_range_partitioning(
key_columns=key_columns,
level=level,
func=map_func,
shuffle_func_cls=ShuffleResample,
resample_kwargs=resample_kwargs,
)
return self.__constructor__(new_modin_frame)
def resample_get_group(self, resample_kwargs, name, obj):
return self._resample_func(
resample_kwargs, "get_group", name=name, allow_range_impl=False, obj=obj
)
def resample_app_ser(self, resample_kwargs, func, *args, **kwargs):
return self._resample_func(
resample_kwargs,
"apply",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_app_df(self, resample_kwargs, func, *args, **kwargs):
return self._resample_func(resample_kwargs, "apply", func=func, *args, **kwargs)
def resample_agg_ser(self, resample_kwargs, func, *args, **kwargs):
return self._resample_func(
resample_kwargs,
"aggregate",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_agg_df(self, resample_kwargs, func, *args, **kwargs):
return self._resample_func(
resample_kwargs, "aggregate", func=func, *args, **kwargs
)
def resample_transform(self, resample_kwargs, arg, *args, **kwargs):
return self._resample_func(
resample_kwargs,
"transform",
arg=arg,
allow_range_impl=False,
*args,
**kwargs,
)
def resample_pipe(self, resample_kwargs, func, *args, **kwargs):
return self._resample_func(resample_kwargs, "pipe", func=func, *args, **kwargs)
def resample_ffill(self, resample_kwargs, limit):
return self._resample_func(
resample_kwargs, "ffill", limit=limit, allow_range_impl=False
)
def resample_bfill(self, resample_kwargs, limit):
return self._resample_func(
resample_kwargs, "bfill", limit=limit, allow_range_impl=False
)
def resample_nearest(self, resample_kwargs, limit):
return self._resample_func(
resample_kwargs, "nearest", limit=limit, allow_range_impl=False
)
def resample_fillna(self, resample_kwargs, method, limit):
return self._resample_func(
resample_kwargs,
"fillna",
method=method,
limit=limit,
allow_range_impl=method is None,
)
def resample_asfreq(self, resample_kwargs, fill_value):
return self._resample_func(resample_kwargs, "asfreq", fill_value=fill_value)
def resample_interpolate(
self,
resample_kwargs,
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
):
return self._resample_func(
resample_kwargs,
"interpolate",
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
allow_range_impl=False,
**kwargs,
)
def resample_count(self, resample_kwargs):
return self._resample_func(resample_kwargs, "count")
def resample_nunique(self, resample_kwargs, *args, **kwargs):
return self._resample_func(resample_kwargs, "nunique", *args, **kwargs)
def resample_first(self, resample_kwargs, *args, **kwargs):
return self._resample_func(
resample_kwargs, "first", allow_range_impl=False, *args, **kwargs
)
def resample_last(self, resample_kwargs, *args, **kwargs):
return self._resample_func(
resample_kwargs, "last", allow_range_impl=False, *args, **kwargs
)
def resample_max(self, resample_kwargs, *args, **kwargs):
return self._resample_func(resample_kwargs, "max", *args, **kwargs)
def resample_mean(self, resample_kwargs, *args, **kwargs):
return self._resample_func(resample_kwargs, "mean", *args, **kwargs)
def resample_median(self, resample_kwargs, *args, **kwargs):
return self._resample_func(resample_kwargs, "median", *args, **kwargs)
def resample_min(self, resample_kwargs, *args, **kwargs):
return self._resample_func(resample_kwargs, "min", *args, **kwargs)
def resample_ohlc_ser(self, resample_kwargs, *args, **kwargs):
return self._resample_func(
resample_kwargs,
"ohlc",
df_op=lambda df: df.squeeze(axis=1),
*args,
**kwargs,
)
def resample_ohlc_df(self, resample_kwargs, *args, **kwargs):
return self._resample_func(resample_kwargs, "ohlc", *args, **kwargs)
def resample_prod(self, resample_kwargs, min_count, *args, **kwargs):
return self._resample_func(
resample_kwargs,
"prod",
min_count=min_count,
*args,
**kwargs,
)
def resample_size(self, resample_kwargs):
return self._resample_func(
resample_kwargs,
"size",
new_columns=[MODIN_UNNAMED_SERIES_LABEL],
allow_range_impl=False,
)
def resample_sem(self, resample_kwargs, *args, **kwargs):
return self._resample_func(resample_kwargs, "sem", *args, **kwargs)
def resample_std(self, resample_kwargs, ddof, *args, **kwargs):
return self._resample_func(resample_kwargs, "std", ddof=ddof, *args, **kwargs)
def resample_sum(self, resample_kwargs, min_count, *args, **kwargs):
return self._resample_func(
resample_kwargs,
"sum",
min_count=min_count,
*args,
**kwargs,
)
def resample_var(self, resample_kwargs, ddof, *args, **kwargs):
return self._resample_func(resample_kwargs, "var", ddof=ddof, *args, **kwargs)
def resample_quantile(self, resample_kwargs, q, **kwargs):
return self._resample_func(resample_kwargs, "quantile", q=q, **kwargs)
def expanding_aggregate(self, axis, expanding_args, func, *args, **kwargs):
new_modin_frame = self._modin_frame.apply_full_axis(
axis,
lambda df: pandas.DataFrame(
df.expanding(*expanding_args).aggregate(func=func, *args, **kwargs)
),
new_index=self.index,
)
return self.__constructor__(new_modin_frame)
expanding_sum = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).sum(*args, **kwargs)
),
shape_preserved=True,
)
expanding_min = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).min(*args, **kwargs)
),
shape_preserved=True,
)
expanding_max = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).max(*args, **kwargs)
),
shape_preserved=True,
)
expanding_mean = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).mean(*args, **kwargs)
),
shape_preserved=True,
)
expanding_median = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).median(*args, **kwargs)
),
shape_preserved=True,
)
expanding_var = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).var(*args, **kwargs)
),
shape_preserved=True,
)
expanding_std = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).std(*args, **kwargs)
),
shape_preserved=True,
)
expanding_count = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).count(*args, **kwargs)
),
shape_preserved=True,
)
def expanding_cov(
self,
fold_axis,
expanding_args,
squeeze_self,
squeeze_other,
other=None,
pairwise=None,
ddof=1,
numeric_only=False,
**kwargs,
):
other_for_pandas = (
other
if other is None
else (
other.to_pandas().squeeze(axis=1)
if squeeze_other
else other.to_pandas()
)
)
if len(self.columns) > 1:
# computing covariance for each column requires having the other columns,
# so we can't parallelize this as a full-column operation
return self.default_to_pandas(
lambda df: pandas.DataFrame.expanding(df, *expanding_args).cov(
other=other_for_pandas,
pairwise=pairwise,
ddof=ddof,
numeric_only=numeric_only,
**kwargs,
)
)
return Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
(df.squeeze(axis=1) if squeeze_self else df)
.expanding(*expanding_args)
.cov(*args, **kwargs)
),
shape_preserved=True,
)(
self,
fold_axis,
expanding_args,
other=other_for_pandas,
pairwise=pairwise,
ddof=ddof,
numeric_only=numeric_only,
**kwargs,
)
def expanding_corr(
self,
fold_axis,
expanding_args,
squeeze_self,
squeeze_other,
other=None,
pairwise=None,
ddof=1,
numeric_only=False,
**kwargs,
):
other_for_pandas = (
other
if other is None
else (
other.to_pandas().squeeze(axis=1)
if squeeze_other
else other.to_pandas()
)
)
if len(self.columns) > 1:
# computing correlation for each column requires having the other columns,
# so we can't parallelize this as a full-column operation
return self.default_to_pandas(
lambda df: pandas.DataFrame.expanding(df, *expanding_args).corr(
other=other_for_pandas,
pairwise=pairwise,
ddof=ddof,
numeric_only=numeric_only,
**kwargs,
)
)
return Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
(df.squeeze(axis=1) if squeeze_self else df)
.expanding(*expanding_args)
.corr(*args, **kwargs)
),
shape_preserved=True,
)(
self,
fold_axis,
expanding_args,
other=other_for_pandas,
pairwise=pairwise,
ddof=ddof,
numeric_only=numeric_only,
**kwargs,
)
expanding_quantile = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).quantile(*args, **kwargs)
),
shape_preserved=True,
)
expanding_sem = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).sem(*args, **kwargs)
),
shape_preserved=True,
)
expanding_kurt = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).kurt(*args, **kwargs)
),
shape_preserved=True,
)
expanding_skew = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).skew(*args, **kwargs)
),
shape_preserved=True,
)
expanding_rank = Fold.register(
lambda df, expanding_args, *args, **kwargs: pandas.DataFrame(
df.expanding(*expanding_args).rank(*args, **kwargs)
),
shape_preserved=True,
)
window_mean = Fold.register(
lambda df, rolling_kwargs, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).mean(*args, **kwargs)
),
shape_preserved=True,
)
window_sum = Fold.register(
lambda df, rolling_kwargs, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).sum(*args, **kwargs)
),
shape_preserved=True,
)
window_var = Fold.register(
lambda df, rolling_kwargs, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).var(ddof=ddof, *args, **kwargs)
),
shape_preserved=True,
)
window_std = Fold.register(
lambda df, rolling_kwargs, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).std(ddof=ddof, *args, **kwargs)
),
shape_preserved=True,
)
rolling_count = Fold.register(
lambda df, rolling_kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).count()
),
shape_preserved=True,
)
rolling_sum = Fold.register(
lambda df, rolling_kwargs, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).sum(*args, **kwargs)
),
shape_preserved=True,
)
rolling_sem = Fold.register(
lambda df, rolling_kwargs, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).sem(*args, **kwargs)
),
shape_preserved=True,
)
rolling_mean = Fold.register(
lambda df, rolling_kwargs, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).mean(*args, **kwargs)
),
shape_preserved=True,
)
rolling_median = Fold.register(
lambda df, rolling_kwargs, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).median(**kwargs)
),
shape_preserved=True,
)
rolling_var = Fold.register(
lambda df, rolling_kwargs, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).var(ddof=ddof, *args, **kwargs)
),
shape_preserved=True,
)
rolling_std = Fold.register(
lambda df, rolling_kwargs, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).std(ddof=ddof, *args, **kwargs)
),
shape_preserved=True,
)
rolling_min = Fold.register(
lambda df, rolling_kwargs, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).min(*args, **kwargs)
),
shape_preserved=True,
)
rolling_max = Fold.register(
lambda df, rolling_kwargs, *args, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).max(*args, **kwargs)
),
shape_preserved=True,
)
rolling_skew = Fold.register(
lambda df, rolling_kwargs, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).skew(**kwargs)
),
shape_preserved=True,
)
rolling_kurt = Fold.register(
lambda df, rolling_kwargs, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).kurt(**kwargs)
),
shape_preserved=True,
)
rolling_apply = Fold.register(
lambda df, rolling_kwargs, func, raw, engine, engine_kwargs, args, kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).apply(
func=func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
),
),
shape_preserved=True,
)
rolling_quantile = Fold.register(
lambda df, rolling_kwargs, q, interpolation, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).quantile(
q=q, interpolation=interpolation, **kwargs
),
),
shape_preserved=True,
)
rolling_rank = Fold.register(
lambda df, rolling_kwargs, method, ascending, pct, numeric_only, **kwargs: pandas.DataFrame(
df.rolling(**rolling_kwargs).rank(
method=method,
ascending=ascending,
pct=pct,
numeric_only=numeric_only,
**kwargs,
),
),
shape_preserved=True,
)
def rolling_corr(self, axis, rolling_kwargs, other, pairwise, *args, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, **rolling_kwargs).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
else:
return Fold.register(
lambda df: pandas.DataFrame(
df.rolling(**rolling_kwargs).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
),
shape_preserved=True,
)(self, axis)
def rolling_cov(self, axis, rolling_kwargs, other, pairwise, ddof, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, **rolling_kwargs).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
else:
return Fold.register(
lambda df: pandas.DataFrame(
df.rolling(**rolling_kwargs).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
),
shape_preserved=True,
)(self, axis)
def rolling_aggregate(self, axis, rolling_kwargs, func, *args, **kwargs):
new_modin_frame = self._modin_frame.apply_full_axis(
axis,
lambda df: pandas.DataFrame(
df.rolling(**rolling_kwargs).aggregate(func=func, *args, **kwargs)
),
new_index=self.index,
)
return self.__constructor__(new_modin_frame)
def unstack(self, level, fill_value):
if not isinstance(self.index, pandas.MultiIndex) or (
isinstance(self.index, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
):
axis = 1
new_columns = [MODIN_UNNAMED_SERIES_LABEL]
need_reindex = True
else:
axis = 0
new_columns = None
need_reindex = False
def map_func(df): # pragma: no cover
return pandas.DataFrame(df.unstack(level=level, fill_value=fill_value))
def is_tree_like_or_1d(calc_index, valid_index):
"""
Check whether specified index is a single dimensional or built in a tree manner.
Parameters
----------
calc_index : pandas.Index
Frame index to check.
valid_index : pandas.Index
Frame index on the opposite from `calc_index` axis.
Returns
-------
bool
True if `calc_index` is not MultiIndex or MultiIndex and built in a tree manner.
False otherwise.
"""
if not isinstance(calc_index, pandas.MultiIndex):
return True
actual_len = 1
for lvl in calc_index.levels:
actual_len *= len(lvl)
return len(self.index) * len(self.columns) == actual_len * len(valid_index)
is_tree_like_or_1d_index = is_tree_like_or_1d(self.index, self.columns)
is_tree_like_or_1d_cols = is_tree_like_or_1d(self.columns, self.index)
is_all_multi_list = False
if (
isinstance(self.index, pandas.MultiIndex)
and isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
and is_tree_like_or_1d_index
and is_tree_like_or_1d_cols
):
is_all_multi_list = True
real_cols_bkp = self.columns
obj = self.copy()
obj.columns = np.arange(len(obj.columns))
else:
obj = self
new_modin_frame = obj._modin_frame.apply_full_axis(
axis, map_func, new_columns=new_columns
)
result = self.__constructor__(new_modin_frame)
def compute_index(index, columns, consider_index=True, consider_columns=True):
"""
Compute new index for the unstacked frame.
Parameters
----------
index : pandas.Index
Index of the original frame.
columns : pandas.Index
Columns of the original frame.
consider_index : bool, default: True
Whether original index contains duplicated values.
If True all duplicates will be droped.
consider_columns : bool, default: True
Whether original columns contains duplicated values.
If True all duplicates will be droped.
Returns
-------
pandas.Index
New index to use in the unstacked frame.
"""
def get_unique_level_values(index):
return [
index.get_level_values(lvl).unique()
for lvl in np.arange(index.nlevels)
]
new_index = (
get_unique_level_values(index)
if consider_index
else index if isinstance(index, list) else [index]
)
new_columns = (
get_unique_level_values(columns) if consider_columns else [columns]
)
return pandas.MultiIndex.from_product([*new_columns, *new_index])
if is_all_multi_list and is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
result = result.sort_index()
index_level_values = [lvl for lvl in obj.index.levels]
result.index = compute_index(
index_level_values, real_cols_bkp, consider_index=False
)
return result
if need_reindex:
if is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
is_recompute_index = isinstance(self.index, pandas.MultiIndex)
is_recompute_columns = not is_recompute_index and isinstance(
self.columns, pandas.MultiIndex
)
new_index = compute_index(
self.index, self.columns, is_recompute_index, is_recompute_columns
)
elif is_tree_like_or_1d_index != is_tree_like_or_1d_cols:
if isinstance(self.columns, pandas.MultiIndex) or not isinstance(
self.index, pandas.MultiIndex
):
return result
else:
index = (
self.index.sortlevel()[0]
if is_tree_like_or_1d_index
and not is_tree_like_or_1d_cols
and isinstance(self.index, pandas.MultiIndex)
else self.index
)
index = pandas.MultiIndex.from_tuples(
list(index) * len(self.columns)
)
columns = self.columns.repeat(len(self.index))
index_levels = [
index.get_level_values(i) for i in range(index.nlevels)
]
new_index = pandas.MultiIndex.from_arrays(
[columns] + index_levels,
names=self.columns.names + self.index.names,
)
else:
return result
result = result.reindex(0, new_index)
return result
def stack(self, level, dropna, sort):
if not isinstance(self.columns, pandas.MultiIndex) or (
isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.columns.nlevels
):
new_columns = [MODIN_UNNAMED_SERIES_LABEL]
else:
new_columns = None
new_modin_frame = self._modin_frame.apply_full_axis(
1,
lambda df: pandas.DataFrame(
df.stack(level=level, dropna=dropna, sort=sort)
),
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
# Map partitions operations
# These operations are operations that apply a function to every partition.
def isin(self, values, ignore_indices=False):
shape_hint = self._shape_hint
if isinstance(values, type(self)):
# HACK: if we don't cast to pandas, then the execution engine will try to
# propagate the distributed Series to workers and most likely would have
# some performance problems.
# TODO: A better way of doing so could be passing this `values` as a query compiler
# and broadcast accordingly.
values = values.to_pandas()
if ignore_indices:
# Pandas logic is that it ignores indexing if 'values' is a 1D object
values = values.squeeze(axis=1)
def isin_func(df, values):
if shape_hint == "column":
df = df.squeeze(axis=1)
res = df.isin(values)
if isinstance(res, pandas.Series):
res = res.to_frame(
MODIN_UNNAMED_SERIES_LABEL if res.name is None else res.name
)
return res
return Map.register(isin_func, shape_hint=shape_hint, dtypes=np.bool_)(
self, values
)
abs = Map.register(pandas.DataFrame.abs, dtypes="copy")
map = Map.register(pandas.DataFrame.map)
conj = Map.register(lambda df, *args, **kwargs: pandas.DataFrame(np.conj(df)))
def convert_dtypes(
self,
infer_objects: bool = True,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
dtype_backend: str = "numpy_nullable",
):
result = Fold.register(pandas.DataFrame.convert_dtypes, shape_preserved=True)(
self,
infer_objects=infer_objects,
convert_string=convert_string,
convert_integer=convert_integer,
convert_boolean=convert_boolean,
convert_floating=convert_floating,
dtype_backend=dtype_backend,
)
# TODO: `numpy_nullable` should be handled similar
if dtype_backend == "pyarrow":
result._modin_frame._pandas_backend = "pyarrow"
return result
invert = Map.register(pandas.DataFrame.__invert__, dtypes="copy")
isna = Map.register(pandas.DataFrame.isna, dtypes=np.bool_)
# TODO: better way to distinguish methods for NumPy API?
_isfinite = Map.register(
lambda df, *args, **kwargs: pandas.DataFrame(np.isfinite(df, *args, **kwargs)),
dtypes=np.bool_,
)
_isinf = Map.register( # Needed for numpy API
lambda df, *args, **kwargs: pandas.DataFrame(np.isinf(df, *args, **kwargs)),
dtypes=np.bool_,
)
_isnat = Map.register( # Needed for numpy API
lambda df, *args, **kwargs: pandas.DataFrame(np.isnat(df, *args, **kwargs)),
dtypes=np.bool_,
)
_isneginf = Map.register( # Needed for numpy API
lambda df, *args, **kwargs: pandas.DataFrame(np.isneginf(df, *args, **kwargs)),
dtypes=np.bool_,
)
_isposinf = Map.register( # Needed for numpy API
lambda df, *args, **kwargs: pandas.DataFrame(np.isposinf(df, *args, **kwargs)),
dtypes=np.bool_,
)
_iscomplex = Map.register( # Needed for numpy API
lambda df, *args, **kwargs: pandas.DataFrame(np.iscomplex(df, *args, **kwargs)),
dtypes=np.bool_,
)
_isreal = Map.register( # Needed for numpy API
lambda df, *args, **kwargs: pandas.DataFrame(np.isreal(df, *args, **kwargs)),
dtypes=np.bool_,
)
_logical_not = Map.register(np.logical_not, dtypes=np.bool_) # Needed for numpy API
_tanh = Map.register(
lambda df, *args, **kwargs: pandas.DataFrame(np.tanh(df, *args, **kwargs))
) # Needed for numpy API
_sqrt = Map.register(
lambda df, *args, **kwargs: pandas.DataFrame(np.sqrt(df, *args, **kwargs))
) # Needed for numpy API
_exp = Map.register(
lambda df, *args, **kwargs: pandas.DataFrame(np.exp(df, *args, **kwargs))
) # Needed for numpy API
negative = Map.register(pandas.DataFrame.__neg__)
notna = Map.register(pandas.DataFrame.notna, dtypes=np.bool_)
round = Map.register(pandas.DataFrame.round)
replace = Map.register(pandas.DataFrame.replace)
series_view = Map.register(
lambda df, *args, **kwargs: pandas.DataFrame(
df.squeeze(axis=1).view(*args, **kwargs)
)
)
to_numeric = Map.register(
lambda df, *args, **kwargs: pandas.DataFrame(
pandas.to_numeric(df.squeeze(axis=1), *args, **kwargs)
)
)
to_timedelta = Map.register(
lambda s, *args, **kwargs: pandas.to_timedelta(
s.squeeze(axis=1), *args, **kwargs
).to_frame(),
dtypes="timedelta64[ns]",
)
# END Map partitions operations
# String map partitions operations
str_capitalize = Map.register(_str_map("capitalize"), dtypes="copy")
str_center = Map.register(_str_map("center"), dtypes="copy")
str_contains = Map.register(_str_map("contains"), dtypes=np.bool_)
str_count = Map.register(_str_map("count"), dtypes=int)
str_endswith = Map.register(_str_map("endswith"), dtypes=np.bool_)
str_find = Map.register(_str_map("find"), dtypes=np.int64)
str_findall = Map.register(_str_map("findall"), dtypes="copy")
str_get = Map.register(_str_map("get"), dtypes="copy")
str_index = Map.register(_str_map("index"), dtypes=np.int64)
str_isalnum = Map.register(_str_map("isalnum"), dtypes=np.bool_)
str_isalpha = Map.register(_str_map("isalpha"), dtypes=np.bool_)
str_isdecimal = Map.register(_str_map("isdecimal"), dtypes=np.bool_)
str_isdigit = Map.register(_str_map("isdigit"), dtypes=np.bool_)
str_islower = Map.register(_str_map("islower"), dtypes=np.bool_)
str_isnumeric = Map.register(_str_map("isnumeric"), dtypes=np.bool_)
str_isspace = Map.register(_str_map("isspace"), dtypes=np.bool_)
str_istitle = Map.register(_str_map("istitle"), dtypes=np.bool_)
str_isupper = Map.register(_str_map("isupper"), dtypes=np.bool_)
str_join = Map.register(_str_map("join"), dtypes="copy")
str_len = Map.register(_str_map("len"), dtypes=int)
str_ljust = Map.register(_str_map("ljust"), dtypes="copy")
str_lower = Map.register(_str_map("lower"), dtypes="copy")
str_lstrip = Map.register(_str_map("lstrip"), dtypes="copy")
str_match = Map.register(_str_map("match"), dtypes="copy")
str_normalize = Map.register(_str_map("normalize"), dtypes="copy")
str_pad = Map.register(_str_map("pad"), dtypes="copy")
_str_partition = Map.register(_str_map("partition"), dtypes="copy")
def str_partition(self, sep=" ", expand=True):
# For `expand`, need an operator that can create more columns than before
if expand:
return super().str_partition(sep=sep, expand=expand)
return self._str_partition(sep=sep, expand=False)
str_repeat = Map.register(_str_map("repeat"), dtypes="copy")
_str_extract = Map.register(_str_map("extract"), dtypes="copy")
def str_extract(self, pat, flags, expand):
regex = re.compile(pat, flags=flags)
# need an operator that can create more columns than before
if expand and regex.groups == 1:
qc = self._str_extract(pat, flags=flags, expand=expand)
qc.columns = get_group_names(regex)
else:
qc = super().str_extract(pat, flags=flags, expand=expand)
return qc
str_replace = Map.register(_str_map("replace"), dtypes="copy", shape_hint="column")
str_rfind = Map.register(_str_map("rfind"), dtypes=np.int64, shape_hint="column")
str_rindex = Map.register(_str_map("rindex"), dtypes=np.int64, shape_hint="column")
str_rjust = Map.register(_str_map("rjust"), dtypes="copy", shape_hint="column")
_str_rpartition = Map.register(
_str_map("rpartition"), dtypes="copy", shape_hint="column"
)
def str_rpartition(self, sep=" ", expand=True):
if expand:
# For `expand`, need an operator that can create more columns than before
return super().str_rpartition(sep=sep, expand=expand)
return self._str_rpartition(sep=sep, expand=False)
_str_rsplit = Map.register(_str_map("rsplit"), dtypes="copy", shape_hint="column")
def str_rsplit(self, pat=None, n=-1, expand=False):
if expand:
# For `expand`, need an operator that can create more columns than before
return super().str_rsplit(pat=pat, n=n, expand=expand)
return self._str_rsplit(pat=pat, n=n, expand=False)
str_rstrip = Map.register(_str_map("rstrip"), dtypes="copy", shape_hint="column")
str_slice = Map.register(_str_map("slice"), dtypes="copy", shape_hint="column")
str_slice_replace = Map.register(
_str_map("slice_replace"), dtypes="copy", shape_hint="column"
)
_str_split = Map.register(_str_map("split"), dtypes="copy", shape_hint="column")
def str_split(self, pat=None, n=-1, expand=False, regex=None):
if expand:
# For `expand`, need an operator that can create more columns than before
return super().str_split(pat=pat, n=n, expand=expand, regex=regex)
return self._str_split(pat=pat, n=n, expand=False, regex=regex)
str_startswith = Map.register(
_str_map("startswith"), dtypes=np.bool_, shape_hint="column"
)
str_strip = Map.register(_str_map("strip"), dtypes="copy", shape_hint="column")
str_swapcase = Map.register(
_str_map("swapcase"), dtypes="copy", shape_hint="column"
)
str_title = Map.register(_str_map("title"), dtypes="copy", shape_hint="column")
str_translate = Map.register(
_str_map("translate"), dtypes="copy", shape_hint="column"
)
str_upper = Map.register(_str_map("upper"), dtypes="copy", shape_hint="column")
str_wrap = Map.register(_str_map("wrap"), dtypes="copy", shape_hint="column")
str_zfill = Map.register(_str_map("zfill"), dtypes="copy", shape_hint="column")
str___getitem__ = Map.register(
_str_map("__getitem__"), dtypes="copy", shape_hint="column"
)
# END String map partitions operations
def unique(self, keep="first", ignore_index=True, subset=None):
# kernels with 'pandas.Series.unique()' work faster
can_use_unique_kernel = (
subset is None
and ignore_index
and len(self.columns) == 1
and keep is not False
)
if not can_use_unique_kernel and not RangePartitioning.get():
return super().unique(keep=keep, ignore_index=ignore_index, subset=subset)
if RangePartitioning.get():
new_modin_frame = self._modin_frame._apply_func_to_range_partitioning(
key_columns=self.columns.tolist() if subset is None else subset,
func=(
(
lambda df: pandas.DataFrame(
df.squeeze(axis=1).unique(), columns=["__reduced__"]
)
)
if can_use_unique_kernel
else (
lambda df: df.drop_duplicates(
keep=keep, ignore_index=ignore_index, subset=subset
)
)
),
preserve_columns=True,
)
else:
# return self.to_pandas().squeeze(axis=1).unique() works faster
# but returns pandas type instead of query compiler
# TODO: https://github.com/modin-project/modin/issues/7182
new_modin_frame = self._modin_frame.apply_full_axis(
0,
lambda x: x.squeeze(axis=1).unique(),
new_columns=self.columns,
)
return self.__constructor__(new_modin_frame, shape_hint=self._shape_hint)
def searchsorted(self, **kwargs):
def searchsorted(df):
"""Apply `searchsorted` function to a single partition."""
result = df.squeeze(axis=1).searchsorted(**kwargs)
if not is_list_like(result):
result = [result]
return pandas.DataFrame(result)
return self.default_to_pandas(searchsorted)
# Dt map partitions operations
dt_date = Map.register(_dt_prop_map("date"), dtypes=np.object_)
dt_time = Map.register(_dt_prop_map("time"), dtypes=np.object_)
dt_timetz = Map.register(_dt_prop_map("timetz"), dtypes=np.object_)
dt_year = Map.register(_dt_prop_map("year"), dtypes=np.int32)
dt_month = Map.register(_dt_prop_map("month"), dtypes=np.int32)
dt_day = Map.register(_dt_prop_map("day"), dtypes=np.int32)
dt_hour = Map.register(_dt_prop_map("hour"), dtypes=np.int64)
dt_minute = Map.register(_dt_prop_map("minute"), dtypes=np.int64)
dt_second = Map.register(_dt_prop_map("second"), dtypes=np.int64)
dt_microsecond = Map.register(_dt_prop_map("microsecond"), dtypes=np.int64)
dt_nanosecond = Map.register(_dt_prop_map("nanosecond"), dtypes=np.int64)
dt_dayofweek = Map.register(_dt_prop_map("dayofweek"), dtypes=np.int64)
dt_weekday = Map.register(_dt_prop_map("weekday"), dtypes=np.int64)
dt_dayofyear = Map.register(_dt_prop_map("dayofyear"), dtypes=np.int64)
dt_quarter = Map.register(_dt_prop_map("quarter"), dtypes=np.int64)
dt_is_month_start = Map.register(_dt_prop_map("is_month_start"), dtypes=np.bool_)
dt_is_month_end = Map.register(_dt_prop_map("is_month_end"), dtypes=np.bool_)
dt_is_quarter_start = Map.register(
_dt_prop_map("is_quarter_start"), dtypes=np.bool_
)
dt_is_quarter_end = Map.register(_dt_prop_map("is_quarter_end"), dtypes=np.bool_)
dt_is_year_start = Map.register(_dt_prop_map("is_year_start"), dtypes=np.bool_)
dt_is_year_end = Map.register(_dt_prop_map("is_year_end"), dtypes=np.bool_)
dt_is_leap_year = Map.register(_dt_prop_map("is_leap_year"), dtypes=np.bool_)
dt_daysinmonth = Map.register(_dt_prop_map("daysinmonth"), dtypes=np.int64)
dt_days_in_month = Map.register(_dt_prop_map("days_in_month"), dtypes=np.int64)
dt_asfreq = Map.register(_dt_func_map("asfreq"))
dt_to_period = Map.register(_dt_func_map("to_period"))
dt_to_pydatetime = Map.register(_dt_func_map("to_pydatetime"), dtypes=np.object_)
dt_tz_localize = Map.register(_dt_func_map("tz_localize"))
dt_tz_convert = Map.register(_dt_func_map("tz_convert"))
dt_normalize = Map.register(_dt_func_map("normalize"))
dt_strftime = Map.register(_dt_func_map("strftime"), dtypes=np.object_)
dt_round = Map.register(_dt_func_map("round"))
dt_floor = Map.register(_dt_func_map("floor"))
dt_ceil = Map.register(_dt_func_map("ceil"))
dt_month_name = Map.register(_dt_func_map("month_name"), dtypes=np.object_)
dt_day_name = Map.register(_dt_func_map("day_name"), dtypes=np.object_)
dt_to_pytimedelta = Map.register(_dt_func_map("to_pytimedelta"), dtypes=np.object_)
dt_total_seconds = Map.register(_dt_func_map("total_seconds"), dtypes=np.float64)
dt_seconds = Map.register(_dt_prop_map("seconds"), dtypes=np.int64)
dt_days = Map.register(_dt_prop_map("days"), dtypes=np.int64)
dt_microseconds = Map.register(_dt_prop_map("microseconds"), dtypes=np.int64)
dt_nanoseconds = Map.register(_dt_prop_map("nanoseconds"), dtypes=np.int64)
dt_qyear = Map.register(_dt_prop_map("qyear"), dtypes=np.int64)
dt_start_time = Map.register(_dt_prop_map("start_time"))
dt_end_time = Map.register(_dt_prop_map("end_time"))
dt_to_timestamp = Map.register(_dt_func_map("to_timestamp"))
# END Dt map partitions operations
def astype(self, col_dtypes, errors: str = "raise"):
# `errors` parameter needs to be part of the function signature because
# other query compilers may not take care of error handling at the API
# layer. This query compiler assumes there won't be any errors due to
# invalid type keys.
return self.__constructor__(
self._modin_frame.astype(col_dtypes, errors=errors),
shape_hint=self._shape_hint,
)
def infer_objects(self):
return self.__constructor__(self._modin_frame.infer_objects())
# Column/Row partitions reduce operations
def first_valid_index(self):
def first_valid_index_builder(df):
"""Get the position of the first valid index in a single partition."""
return df.set_axis(pandas.RangeIndex(len(df.index)), axis="index").apply(
lambda df: df.first_valid_index()
)
# We get the minimum from each column, then take the min of that to get
# first_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(self._modin_frame.reduce(0, first_valid_index_builder))
.min(axis=1)
.to_pandas()
.squeeze()
)
return self.index[first_result]
def last_valid_index(self):
def last_valid_index_builder(df):
"""Get the position of the last valid index in a single partition."""
return df.set_axis(pandas.RangeIndex(len(df.index)), axis="index").apply(
lambda df: df.last_valid_index()
)
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(self._modin_frame.reduce(0, last_valid_index_builder))
.max(axis=1)
.to_pandas()
.squeeze()
)
return self.index[first_result]
# END Column/Row partitions reduce operations
def describe(self, percentiles: np.ndarray):
# Use pandas to calculate the correct columns
empty_df = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.describe(percentiles, include="all")
)
new_index = empty_df.index
def describe_builder(df, internal_indices=[]): # pragma: no cover
"""Apply `describe` function to the subset of columns in a single partition."""
# The index of the resulting dataframe is the same amongst all partitions
# when dealing with the same data type. However, if we work with columns
# that contain strings, we can get extra values in our result index such as
# 'unique', 'top', and 'freq'. Since we call describe() on each partition,
# we can have cases where certain partitions do not contain any of the
# object string data leading to an index mismatch between partitions.
# Thus, we must reindex each partition with the global new_index.
return (
df.iloc[:, internal_indices]
.describe(percentiles=percentiles, include="all")
.reindex(new_index)
)
return self.__constructor__(
self._modin_frame.apply_full_axis_select_indices(
0,
describe_builder,
empty_df.columns,
new_index=new_index,
new_columns=empty_df.columns,
)
)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
cummax = Fold.register(pandas.DataFrame.cummax, shape_preserved=True)
cummin = Fold.register(pandas.DataFrame.cummin, shape_preserved=True)
cumsum = Fold.register(pandas.DataFrame.cumsum, shape_preserved=True)
cumprod = Fold.register(pandas.DataFrame.cumprod, shape_preserved=True)
_diff = Fold.register(pandas.DataFrame.diff, shape_preserved=True)
def diff(self, axis, periods):
return self._diff(fold_axis=axis, axis=axis, periods=periods)
def clip(self, lower, upper, **kwargs):
if isinstance(lower, BaseQueryCompiler):
lower = lower.to_pandas().squeeze(1)
if isinstance(upper, BaseQueryCompiler):
upper = upper.to_pandas().squeeze(1)
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.get("axis", 0)
if is_list_like(lower) or is_list_like(upper):
new_modin_frame = self._modin_frame.fold(
axis, lambda df: df.clip(**kwargs), shape_preserved=True
)
else:
new_modin_frame = self._modin_frame.map(lambda df: df.clip(**kwargs))
return self.__constructor__(new_modin_frame)
corr = CorrCovBuilder.build_corr_method()
def cov(self, min_periods=None, ddof=1):
if self.get_pandas_backend() == "pyarrow":
return super().cov(min_periods=min_periods, ddof=ddof)
# _nancorr use numpy which incompatible with pandas dataframes on pyarrow
return self._nancorr(min_periods=min_periods, cov=True, ddof=ddof)
def _nancorr(self, min_periods=1, cov=False, ddof=1):
"""
Compute either pairwise covariance or pairwise correlation of columns.
This function considers NA/null values the same like pandas does.
Parameters
----------
min_periods : int, default: 1
Minimum number of observations required per pair of columns
to have a valid result.
cov : boolean, default: False
Either covariance or correlation should be computed.
ddof : int, default: 1
Means Delta Degrees of Freedom. The divisor used in calculations.
Returns
-------
PandasQueryCompiler
The covariance or correlation matrix.
Notes
-----
This method is only used to compute covariance at the moment.
"""
other = self.to_numpy()
try:
other_mask = self._isfinite().to_numpy()
except TypeError as err:
# Pandas raises ValueError on unsupported types, so casting
# the exception to a proper type
raise ValueError("Unsupported types with 'numeric_only=False'") from err
n_cols = other.shape[1]
if min_periods is None:
min_periods = 1
def map_func(df): # pragma: no cover
"""Compute covariance or correlation matrix for the passed frame."""
df = df.to_numpy()
n_rows = df.shape[0]
df_mask = np.isfinite(df)
result = np.empty((n_rows, n_cols), dtype=np.float64)
for i in range(n_rows):
df_ith_row = df[i]
df_ith_mask = df_mask[i]
for j in range(n_cols):
other_jth_col = other[:, j]
valid = df_ith_mask & other_mask[:, j]
vx = df_ith_row[valid]
vy = other_jth_col[valid]
nobs = len(vx)
if nobs < min_periods:
result[i, j] = np.nan
else:
vx = vx - vx.mean()
vy = vy - vy.mean()
sumxy = (vx * vy).sum()
sumxx = (vx * vx).sum()
sumyy = (vy * vy).sum()
denom = (nobs - ddof) if cov else np.sqrt(sumxx * sumyy)
if denom != 0:
result[i, j] = sumxy / denom
else:
result[i, j] = np.nan
return pandas.DataFrame(result)
columns = self.columns
index = columns.copy()
transponed_self = self.transpose()
new_modin_frame = transponed_self._modin_frame.apply_full_axis(
1, map_func, new_index=index, new_columns=columns
)
return transponed_self.__constructor__(new_modin_frame)
def dot(self, other, squeeze_self=None, squeeze_other=None):
if isinstance(other, PandasQueryCompiler):
other = (
other.to_pandas().squeeze(axis=1)
if squeeze_other
else other.to_pandas()
)
num_cols = other.shape[1] if len(other.shape) > 1 else 1
if len(self.columns) == 1:
new_index = (
[MODIN_UNNAMED_SERIES_LABEL]
if (len(self.index) == 1 or squeeze_self) and num_cols == 1
else None
)
new_columns = (
[MODIN_UNNAMED_SERIES_LABEL] if squeeze_self and num_cols == 1 else None
)
axis = 0
else:
new_index = self.index
new_columns = [MODIN_UNNAMED_SERIES_LABEL] if num_cols == 1 else None
axis = 1
# If either new index or new columns are supposed to be a single-dimensional,
# then we use a special labeling for them. Besides setting the new labels as
# a metadata to the resulted frame, we also want to set them inside the kernel,
# so actual partitions would be labeled accordingly (there's a 'sync_label'
# parameter that can do the same, but doing it manually is faster)
align_index = isinstance(new_index, list) and new_index == [
MODIN_UNNAMED_SERIES_LABEL
]
align_columns = new_columns == [MODIN_UNNAMED_SERIES_LABEL]
def map_func(df, other=other, squeeze_self=squeeze_self): # pragma: no cover
"""Compute matrix multiplication of the passed frames."""
result = df.squeeze(axis=1).dot(other) if squeeze_self else df.dot(other)
if is_list_like(result):
res = pandas.DataFrame(result)
else:
res = pandas.DataFrame([result])
# manual aligning with external index to avoid `sync_labels` overhead
if align_columns:
res.columns = [MODIN_UNNAMED_SERIES_LABEL]
if align_index:
res.index = [MODIN_UNNAMED_SERIES_LABEL]
return res
new_modin_frame = self._modin_frame.apply_full_axis(
axis,
map_func,
new_index=new_index,
new_columns=new_columns,
sync_labels=False,
)
return self.__constructor__(new_modin_frame)
def _nsort(self, n, columns=None, keep="first", sort_type="nsmallest"):
"""
Return first N rows of the data sorted in the specified order.
Parameters
----------
n : int
Number of rows to return.
columns : list of labels, optional
Column labels to sort data by.
keep : {"first", "last", "all"}, default: "first"
How to pick first rows in case of duplicated values:
- "first": prioritize first occurrence.
- "last": prioritize last occurrence.
- "all": do not drop any duplicates, even if it means selecting more than `n` rows.
sort_type : {"nsmallest", "nlargest"}, default: "nsmallest"
"nsmallest" means sort in descending order, "nlargest" means
sort in ascending order.
Returns
-------
PandasQueryCompiler
New QueryCompiler containing the first N rows of the data
sorted in the given order.
"""
def map_func(df, n=n, keep=keep, columns=columns): # pragma: no cover
"""Return first `N` rows of the sorted data for a single partition."""
if columns is None:
return pandas.DataFrame(
getattr(pandas.Series, sort_type)(
df.squeeze(axis=1), n=n, keep=keep
)
)
return getattr(pandas.DataFrame, sort_type)(
df, n=n, columns=columns, keep=keep
)
if columns is None:
new_columns = [MODIN_UNNAMED_SERIES_LABEL]
else:
new_columns = self.columns
new_modin_frame = self._modin_frame.apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def nsmallest(self, *args, **kwargs):
return self._nsort(sort_type="nsmallest", *args, **kwargs)
def nlargest(self, *args, **kwargs):
return self._nsort(sort_type="nlargest", *args, **kwargs)
def eval(self, expr, **kwargs):
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
empty_eval = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.eval(expr, inplace=False, **kwargs)
)
if isinstance(empty_eval, pandas.Series):
new_columns = (
[empty_eval.name]
if empty_eval.name is not None
else [MODIN_UNNAMED_SERIES_LABEL]
)
else:
new_columns = empty_eval.columns
new_modin_frame = self._modin_frame.apply_full_axis(
1,
lambda df: pandas.DataFrame(df.eval(expr, inplace=False, **kwargs)),
new_index=self.index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def mode(self, **kwargs):
axis = kwargs.get("axis", 0)
def mode_builder(df): # pragma: no cover
"""Compute modes for a single partition."""
result = pandas.DataFrame(df.mode(**kwargs))
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if axis == 0 and len(df) != len(result):
# Pad rows
result = result.reindex(index=pandas.RangeIndex(len(df.index)))
elif axis == 1 and len(df.columns) != len(result.columns):
# Pad columns
result = result.reindex(columns=pandas.RangeIndex(len(df.columns)))
return pandas.DataFrame(result)
if axis == 0:
new_index = pandas.RangeIndex(len(self.index))
new_columns = self.columns
else:
new_index = self.index
new_columns = pandas.RangeIndex(len(self.columns))
new_modin_frame = self._modin_frame.apply_full_axis(
axis, mode_builder, new_index=new_index, new_columns=new_columns
)
return self.__constructor__(new_modin_frame).dropna(axis=axis, how="all")
def fillna(self, **kwargs):
squeeze_self = kwargs.pop("squeeze_self", False)
squeeze_value = kwargs.pop("squeeze_value", False)
axis = kwargs.get("axis", 0)
value = kwargs.pop("value")
method = kwargs.get("method", None)
limit = kwargs.get("limit", None)
full_axis = method is not None or limit is not None
new_dtypes = None
if isinstance(value, BaseQueryCompiler):
# This code assumes that the operation occurs with the same query compiler
assert isinstance(value, PandasQueryCompiler)
if squeeze_self:
# Self is a Series type object
if full_axis:
value = value.to_pandas().squeeze(axis=1)
def fillna_builder(series): # pragma: no cover
# `limit` parameter works only on `Series` type, so we have to squeeze both objects to get
# correct behavior.
return series.squeeze(axis=1).fillna(value=value, **kwargs)
new_modin_frame = self._modin_frame.apply_full_axis(
0, fillna_builder
)
else:
def fillna_builder(df, value_arg):
if isinstance(value_arg, pandas.DataFrame):
value_arg = value_arg.squeeze(axis=1)
res = df.squeeze(axis=1).fillna(value=value_arg, **kwargs)
return pandas.DataFrame(res)
new_modin_frame = self._modin_frame.n_ary_op(
fillna_builder,
[value._modin_frame],
join_type="left",
copartition_along_columns=False,
)
return self.__constructor__(new_modin_frame)
else:
# Self is a DataFrame type object
if squeeze_value:
# Value is Series type object
value = value.to_pandas().squeeze(axis=1)
def fillna(df):
return df.fillna(value=value, **kwargs)
# Continue to end of this function
else:
# Value is a DataFrame type object
def fillna_builder(df, right):
return df.fillna(value=right, **kwargs)
new_modin_frame = self._modin_frame.broadcast_apply(
0, fillna_builder, value._modin_frame
)
return self.__constructor__(new_modin_frame)
elif isinstance(value, dict):
if squeeze_self:
# For Series dict works along the index.
def fillna(df):
return pandas.DataFrame(
df.squeeze(axis=1).fillna(value=value, **kwargs)
)
else:
# For DataFrames dict works along columns, all columns have to be present.
def fillna(df):
func_dict = {
col: val for (col, val) in value.items() if col in df.columns
}
return df.fillna(value=func_dict, **kwargs)
if self.frame_has_materialized_dtypes:
dtypes = self.dtypes
value_dtypes = pandas.DataFrame(
{k: [v] for (k, v) in value.items()}
).dtypes
if all(
find_common_type([dtypes[col], dtype]) == dtypes[col]
for (col, dtype) in value_dtypes.items()
if col in dtypes
):
new_dtypes = dtypes
else:
if self.frame_has_materialized_dtypes:
dtype = pandas.Series(value).dtype
if all(find_common_type([t, dtype]) == t for t in self.dtypes):
new_dtypes = self.dtypes
def fillna(df):
return df.fillna(value=value, **kwargs)
if full_axis:
new_modin_frame = self._modin_frame.fold(axis, fillna, shape_preserved=True)
else:
new_modin_frame = self._modin_frame.map(fillna, dtypes=new_dtypes)
return self.__constructor__(new_modin_frame)
def quantile_for_list_of_values(self, **kwargs):
axis = kwargs.get("axis", 0)
q = kwargs.get("q")
numeric_only = kwargs.get("numeric_only", True)
assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list, tuple))
if numeric_only:
new_columns = self._modin_frame.numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or lib.is_np_dtype(dtype, "mM"))
]
if axis == 1:
query_compiler = self.getitem_column_array(new_columns)
new_columns = self.index
else:
query_compiler = self
def quantile_builder(df, **kwargs):
result = df.quantile(**kwargs)
return result.T if kwargs.get("axis", 0) == 1 else result
# This took a long time to debug, so here is the rundown of why this is needed.
# Previously, we were operating on select indices, but that was broken. We were
# not correctly setting the columns/index. Because of how we compute `to_pandas`
# and because of the static nature of the index for `axis=1` it is easier to
# just handle this as the transpose (see `quantile_builder` above for the
# transpose within the partition) than it is to completely rework other
# internal methods. Basically we are returning the transpose of the object for
# correctness and cleanliness of the code.
if axis == 1:
q_index = new_columns
new_columns = pandas.Index(q)
else:
q_index = pandas.Index(q)
new_modin_frame = query_compiler._modin_frame.apply_full_axis(
axis,
lambda df: quantile_builder(df, **kwargs),
new_index=q_index,
new_columns=new_columns,
dtypes=np.float64,
)
result = self.__constructor__(new_modin_frame)
return result.transpose() if axis == 1 else result
def rank(self, **kwargs):
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
new_modin_frame = self._modin_frame.apply_full_axis(
axis,
lambda df: df.rank(**kwargs),
new_index=self._modin_frame.copy_index_cache(copy_lengths=True),
new_columns=(
self._modin_frame.copy_columns_cache(copy_lengths=True)
if not numeric_only
else None
),
dtypes=np.float64,
sync_labels=False,
)
return self.__constructor__(new_modin_frame)
def sort_index(self, **kwargs):
axis = kwargs.pop("axis", 0)
level = kwargs.pop("level", None)
sort_remaining = kwargs.pop("sort_remaining", True)
kwargs["inplace"] = False
if level is not None or self.has_multiindex(axis=axis):
return self.default_to_pandas(
pandas.DataFrame.sort_index,
axis=axis,
level=level,
sort_remaining=sort_remaining,
**kwargs,
)
# sort_index can have ascending be None and behaves as if it is False.
# sort_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
if axis:
new_columns = self.columns.to_frame().sort_index(**kwargs).index
new_index = self.index
else:
new_index = self.index.to_frame().sort_index(**kwargs).index
new_columns = self.columns
new_modin_frame = self._modin_frame.apply_full_axis(
axis,
lambda df: df.sort_index(
axis=axis, level=level, sort_remaining=sort_remaining, **kwargs
),
new_index,
new_columns,
dtypes="copy" if axis == 0 else None,
)
return self.__constructor__(new_modin_frame)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
ignore_index=True,
):
ErrorMessage.mismatch_with_pandas(
operation="melt", message="Order of rows could be different from pandas"
)
if var_name is None:
var_name = "variable"
def _convert_to_list(x):
"""Convert passed object to a list."""
if is_list_like(x):
x = [*x]
elif x is not None:
x = [x]
else:
x = []
return x
id_vars, value_vars = map(_convert_to_list, [id_vars, value_vars])
if len(value_vars) == 0:
value_vars = self.columns.drop(id_vars)
if len(id_vars) != 0:
to_broadcast = self.getitem_column_array(id_vars)._modin_frame
else:
to_broadcast = None
def applyier(df, internal_indices, other=[], internal_other_indices=[]):
"""
Apply `melt` function to a single partition.
Parameters
----------
df : pandas.DataFrame
Partition of the self frame.
internal_indices : list of ints
Positional indices of columns in this particular partition which
represents `value_vars` columns in the source frame.
other : pandas.DataFrame
Broadcasted partition which contains `id_vars` columns of the
source frame.
internal_other_indices : list of ints
Positional indices of columns in `other` partition which
represents `id_vars` columns in the source frame.
Returns
-------
pandas.DataFrame
The result of the `melt` function for this particular partition.
"""
if len(other):
other = pandas.concat(other, axis=1)
columns_to_add = other.columns.difference(df.columns)
df = pandas.concat([df, other[columns_to_add]], axis=1)
return df.melt(
id_vars=id_vars,
value_vars=df.columns[internal_indices],
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
# we have no able to calculate correct indices here, so making it `dummy_index`
inconsistent_frame = self._modin_frame.broadcast_apply_select_indices(
axis=0,
apply_indices=value_vars,
func=applyier,
other=to_broadcast,
new_index=["dummy_index"] * len(id_vars),
new_columns=["dummy_index"] * len(id_vars),
)
# after applying `melt` for selected indices we will get partitions like this:
# id_vars vars value | id_vars vars value
# 0 foo col3 1 | 0 foo col5 a so stacking it into
# 1 fiz col3 2 | 1 fiz col5 b `new_parts` to get
# 2 bar col3 3 | 2 bar col5 c correct answer
# 3 zoo col3 4 | 3 zoo col5 d
new_parts = np.array(
[np.array([x]) for x in np.concatenate(inconsistent_frame._partitions.T)]
)
new_index = pandas.RangeIndex(len(self.index) * len(value_vars))
new_modin_frame = self._modin_frame.__constructor__(
new_parts,
index=new_index,
columns=id_vars + [var_name, value_name],
)
result = self.__constructor__(new_modin_frame)
# this assigment needs to propagate correct indices into partitions
result.index = new_index
return result
# END Map across rows/columns
# __getitem__ methods
__getitem_bool = Binary.register(
lambda df, r: df[[r]] if is_scalar(r) else df[r],
join_type="left",
labels="drop",
)
# __setitem__ methods
def setitem_bool(self, row_loc: PandasQueryCompiler, col_loc, item):
def _set_item(df, row_loc): # pragma: no cover
df = df.copy()
df.loc[row_loc.squeeze(axis=1), col_loc] = item
return df
if self.frame_has_materialized_dtypes and is_scalar(item):
new_dtypes = self.dtypes.copy()
old_dtypes = new_dtypes[col_loc]
item_type = extract_dtype(item)
if isinstance(old_dtypes, pandas.Series):
new_dtypes[col_loc] = [
find_common_type([dtype, item_type]) for dtype in old_dtypes.values
]
else:
new_dtypes[col_loc] = find_common_type([old_dtypes, item_type])
else:
new_dtypes = None
new_modin_frame = self._modin_frame.broadcast_apply_full_axis(
axis=1,
func=_set_item,
other=row_loc._modin_frame,
new_index=self._modin_frame.copy_index_cache(copy_lengths=True),
new_columns=self._modin_frame.copy_columns_cache(),
keep_partitioning=False,
dtypes=new_dtypes,
)
return self.__constructor__(new_modin_frame)
# END __setitem__ methods
def __validate_bool_indexer(self, indexer):
if len(indexer) != len(self.index):
raise ValueError(
f"Item wrong length {len(indexer)} instead of {len(self.index)}."
)
if isinstance(indexer, pandas.Series) and not indexer.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
PendingDeprecationWarning,
stacklevel=4,
)
def getitem_array(self, key):
if isinstance(key, type(self)):
# here we check for a subset of bool indexers only to simplify the code;
# there could (potentially) be more of those, but we assume the most frequent
# ones are just of bool dtype
if len(key.dtypes) == 1 and is_bool_dtype(key.dtypes.iloc[0]):
self.__validate_bool_indexer(key.index)
return self.__getitem_bool(key, broadcast=True, dtypes="copy")
key = key.to_pandas().squeeze(axis=1)
if is_bool_indexer(key):
self.__validate_bool_indexer(key)
key = check_bool_indexer(self.index, key)
# We convert to a RangeIndex because getitem_row_array is expecting a list
# of indices, and RangeIndex will give us the exact indices of each boolean
# requested.
key = pandas.RangeIndex(len(self.index))[key]
if len(key):
return self.getitem_row_array(key)
else:
return self.from_pandas(
pandas.DataFrame(columns=self.columns), type(self._modin_frame)
)
else:
if any(k not in self.columns for k in key):
raise KeyError(
"{} not index".format(
str([k for k in key if k not in self.columns]).replace(",", "")
)
)
return self.getitem_column_array(key)
def getitem_column_array(
self, key, numeric=False, ignore_order=False
) -> PandasQueryCompiler:
shape_hint = "column" if len(key) == 1 else None
if numeric:
if ignore_order and is_list_like(key):
key = np.sort(key)
new_modin_frame = self._modin_frame.take_2d_labels_or_positional(
col_positions=key
)
else:
if ignore_order and is_list_like(key):
key_set = frozenset(key)
key = [col for col in self.columns if col in key_set]
new_modin_frame = self._modin_frame.take_2d_labels_or_positional(
col_labels=key
)
return self.__constructor__(new_modin_frame, shape_hint=shape_hint)
def getitem_row_array(self, key):
return self.__constructor__(
self._modin_frame.take_2d_labels_or_positional(row_positions=key)
)
def setitem(self, axis, key, value):
# Default to pandas for empty frames to avoid complex partitioning issues
if axis == 0 and not self.lazy_row_count and self.get_axis_len(0) == 0:
def do_setitem(df: pandas.DataFrame, key, value) -> pandas.DataFrame:
df[key] = value
return df
return self.default_to_pandas(do_setitem, key=key, value=value)
if axis == 0:
value = self._wrap_column_data(value)
return self._setitem(axis=axis, key=key, value=value, how=None)
def _setitem(self, axis, key, value, how="inner"):
"""
Set the row/column defined by `key` to the `value` provided.
In contrast with `setitem` with this function you can specify how
to handle non-aligned `self` and `value`.
Parameters
----------
axis : {0, 1}
Axis to set `value` along. 0 means set row, 1 means set column.
key : scalar
Row/column label to set `value` in.
value : PandasQueryCompiler (1xN), list-like or scalar
Define new row/column value.
how : {"inner", "outer", "left", "right", None}, default: "inner"
Type of join to perform if specified axis of `self` and `value` are not
equal. If `how` is `None`, reindex `value` with `self` labels without joining.
Returns
-------
BaseQueryCompiler
New QueryCompiler with updated `key` value.
"""
def setitem_builder(df, internal_indices=[]): # pragma: no cover
"""
Set the row/column to the `value` in a single partition.
Parameters
----------
df : pandas.DataFrame
Partition of the self frame.
internal_indices : list of ints
Positional indices of rows/columns in this particular partition
which represents `key` in the source frame.
Returns
-------
pandas.DataFrame
Partition data with updated values.
"""
df = df.copy()
if len(internal_indices) == 1:
if axis == 0:
df[df.columns[internal_indices[0]]] = value
else:
df.iloc[internal_indices[0]] = value
else:
if axis == 0:
df[df.columns[internal_indices]] = value
else:
df.iloc[internal_indices] = value
return df
if isinstance(value, type(self)):
value.columns = [key]
if axis == 1:
value = value.transpose()
idx = self.get_axis(axis ^ 1).get_indexer_for([key])[0]
return self.insert_item(axis ^ 1, idx, value, how, replace=True)
if axis == 0:
value_dtype = extract_dtype(value)
old_columns = self.columns.difference(pandas.Index([key]))
old_dtypes = ModinDtypes(self._modin_frame._dtypes).lazy_get(old_columns)
new_dtypes = ModinDtypes.concat(
[
old_dtypes,
DtypesDescriptor({key: value_dtype}, cols_with_unknown_dtypes=[]),
]
# get dtypes in a proper order
).lazy_get(self.columns)
else:
# TODO: apply 'find_common_dtype' to the value's dtype and old column dtypes
new_dtypes = None
# TODO: rework by passing list-like values to `apply_select_indices`
# as an item to distribute
if is_list_like(value):
new_modin_frame = self._modin_frame.apply_full_axis_select_indices(
axis,
setitem_builder,
[key],
new_index=self.index,
new_columns=self.columns,
keep_remaining=True,
new_dtypes=new_dtypes,
)
else:
new_modin_frame = self._modin_frame.apply_select_indices(
axis,
setitem_builder,
[key],
new_index=self.index,
new_columns=self.columns,
new_dtypes=new_dtypes,
keep_remaining=True,
)
return self.__constructor__(new_modin_frame)
# END __getitem__ methods
# Drop/Dropna
# This will change the shape of the resulting data.
def dropna(self, **kwargs):
is_column_wise = kwargs.get("axis", 0) == 1
no_thresh_passed = kwargs.get("thresh", lib.no_default) in (
lib.no_default,
None,
)
# The map reduce approach works well for frames with few columnar partitions
processable_amount_of_partitions = (
self._modin_frame.num_parts < CpuCount.get() * 32
)
if is_column_wise and no_thresh_passed and processable_amount_of_partitions:
how = kwargs.get("how", "any")
subset = kwargs.get("subset")
how = "any" if how in (lib.no_default, None) else how
condition = lambda df: getattr(df, how)() # noqa: E731 (lambda assignment)
def mapper(df: pandas.DataFrame):
"""Compute a mask indicating whether there are all/any NaN values in each column."""
if subset is not None:
subset_mask = condition(
df.loc[df.index.intersection(subset)].isna()
)
# we have to keep other columns so setting their mask
# values with `False`
mask = pandas.Series(
np.zeros(df.shape[1], dtype=bool), index=df.columns
)
mask.update(subset_mask)
else:
mask = condition(df.isna())
# for proper partitioning at the 'reduce' phase each partition has to
# represent a one-row frame rather than a one-column frame, so calling `.T` here
return mask.to_frame().T
masks = self._modin_frame.apply_full_axis(
func=mapper, axis=1, keep_partitioning=True
)
def reduce(df: pandas.DataFrame, mask: pandas.DataFrame):
"""Drop columns from `df` that satisfy the NaN `mask`."""
# `mask` here consists of several rows each representing the masks result
# for a certain row partition:
# col1 col2 col3
# 0 True True False col1 True
# 1 False True False ---> mask.any() ---> col2 True
# 2 True True False col3 False
# in order to get the proper 1D mask we have to reduce the partition's
# results by applying the condition one more time
to_take_mask = ~condition(mask)
to_take = []
for col, value in to_take_mask.items():
if value and col in df:
to_take.append(col)
return df[to_take]
result = self._modin_frame.broadcast_apply(
# 'masks' have identical partitioning as we specified 'keep_partitioning=True' before,
# this means that we can safely skip the 'co-partitioning' stage
axis=1,
func=reduce,
other=masks,
copartition=False,
labels="drop",
)
return self.__constructor__(result, shape_hint=self._shape_hint)
return self.__constructor__(
self._modin_frame.filter(
kwargs.get("axis", 0) ^ 1,
lambda df: pandas.DataFrame.dropna(df, **kwargs),
),
shape_hint=self._shape_hint,
)
def drop(
self, index=None, columns=None, errors: str = "raise"
) -> PandasQueryCompiler:
# `errors` parameter needs to be part of the function signature because
# other query compilers may not take care of error handling at the API
# layer. This query compiler assumes there won't be any errors due to
# invalid keys.
if index is not None:
index = np.sort(self.index.get_indexer_for(self.index.difference(index)))
if columns is not None:
columns = np.sort(
self.columns.get_indexer_for(self.columns.difference(columns))
)
new_modin_frame = self._modin_frame.take_2d_labels_or_positional(
row_positions=index, col_positions=columns
)
return self.__constructor__(new_modin_frame)
# END Drop/Dropna
def duplicated(self, **kwargs):
def _compute_hash(df):
result = df.apply(
lambda s: hashlib.new("md5", str(tuple(s)).encode()).hexdigest(), axis=1
)
if isinstance(result, pandas.Series):
result = result.to_frame(
result.name
if result.name is not None
else MODIN_UNNAMED_SERIES_LABEL
)
return result
def _compute_duplicated(df): # pragma: no cover
result = df.duplicated(**kwargs)
if isinstance(result, pandas.Series):
result = result.to_frame(
result.name
if result.name is not None
else MODIN_UNNAMED_SERIES_LABEL
)
return result
if self._modin_frame._partitions.shape[1] > 1:
# if the number of columns (or column partitions) we are checking for duplicates is larger than 1,
# we must first hash them to generate a single value that can be compared across rows.
hashed_modin_frame = self._modin_frame.reduce(
axis=1,
function=_compute_hash,
dtypes=pandas.api.types.pandas_dtype("O"),
)
else:
hashed_modin_frame = self._modin_frame
new_modin_frame = hashed_modin_frame.apply_full_axis(
axis=0,
func=_compute_duplicated,
new_index=self._modin_frame.copy_index_cache(),
new_columns=[MODIN_UNNAMED_SERIES_LABEL],
dtypes=np.bool_,
keep_partitioning=True,
)
return self.__constructor__(new_modin_frame, shape_hint="column")
# Insert
# This method changes the shape of the resulting data. In Pandas, this
# operation is always inplace, but this object is immutable, so we just
# return a new one from here and let the front end handle the inplace
# update.
def insert(self, loc, column, value):
value = self._wrap_column_data(value)
if isinstance(value, type(self)):
value.columns = [column]
return self.insert_item(axis=1, loc=loc, value=value, how=None)
def insert(df, internal_indices=[]): # pragma: no cover
"""
Insert new column to the partition.
Parameters
----------
df : pandas.DataFrame
Partition of the self frame.
internal_indices : list of ints
Positional index of the column in this particular partition
to insert new column after.
"""
internal_idx = int(internal_indices[0])
df.insert(internal_idx, column, value)
return df
value_dtype = extract_dtype(value)
new_columns = self.columns.insert(loc, column)
new_dtypes = ModinDtypes.concat(
[
self._modin_frame._dtypes,
DtypesDescriptor({column: value_dtype}, cols_with_unknown_dtypes=[]),
]
).lazy_get(
new_columns
) # get dtypes in a proper order
# TODO: rework by passing list-like values to `apply_select_indices`
# as an item to distribute
new_modin_frame = self._modin_frame.apply_full_axis_select_indices(
0,
insert,
numeric_indices=[loc],
keep_remaining=True,
new_index=self.index,
new_columns=new_columns,
new_dtypes=new_dtypes,
)
return self.__constructor__(new_modin_frame)
def _wrap_column_data(self, data):
"""
If the data is list-like, create a single column query compiler.
Parameters
----------
data : any
Returns
-------
data or PandasQueryCompiler
"""
if is_list_like(data):
return self.from_pandas(
pandas.DataFrame(pandas.Series(data, index=self.index)),
data_cls=type(self._modin_frame),
)
return data
# END Insert
def explode(self, column):
return self.__constructor__(
self._modin_frame.explode(1, lambda df: df.explode(column))
)
# UDF (apply and agg) methods
# There is a wide range of behaviors that are supported, so a lot of the
# logic can get a bit convoluted.
def apply(self, func, axis, *args, **kwargs):
# if any of args contain modin object, we should
# convert it to pandas
args = try_cast_to_pandas(args)
kwargs = try_cast_to_pandas(kwargs)
_, func, _, _ = reconstruct_func(func, **kwargs)
if isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs)
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs)
else:
return self._callable_func(func, axis, *args, **kwargs)
def apply_on_series(self, func, *args, **kwargs):
args = try_cast_to_pandas(args)
kwargs = try_cast_to_pandas(kwargs)
assert self.is_series_like()
# We use apply_full_axis here instead of map since the latter assumes that the
# shape of the DataFrame does not change. However, it is possible for functions
# applied to Series objects to end up creating DataFrames. It is possible that
# using apply_full_axis is much less performant compared to using a variant of
# map.
return self.__constructor__(
self._modin_frame.apply_full_axis(
1, lambda df: df.squeeze(axis=1).apply(func, *args, **kwargs)
)
)
def _dict_func(self, func, axis, *args, **kwargs):
"""
Apply passed functions to the specified rows/columns.
Parameters
----------
func : dict(label) -> [callable, str]
Dictionary that maps axis labels to the function to apply against them.
axis : {0, 1}
Target axis to apply functions along. 0 means apply to columns,
1 means apply to rows.
*args : args
Arguments to pass to the specified functions.
**kwargs : kwargs
Arguments to pass to the specified functions.
Returns
-------
PandasQueryCompiler
New QueryCompiler containing the results of passed functions.
"""
if "axis" not in kwargs:
kwargs["axis"] = axis
func = {k: wrap_udf_function(v) if callable(v) else v for k, v in func.items()}
def dict_apply_builder(df, internal_indices=[]): # pragma: no cover
# Sometimes `apply` can return a `Series`, but we require that internally
# all objects are `DataFrame`s.
# It looks like it doesn't need to use `internal_indices` option internally
# for the case since `apply` use labels from dictionary keys in `func` variable.
return pandas.DataFrame(df.apply(func, *args, **kwargs))
labels = list(func.keys())
return self.__constructor__(
self._modin_frame.apply_full_axis_select_indices(
axis,
dict_apply_builder,
labels,
new_index=labels if axis == 1 else None,
new_columns=labels if axis == 0 else None,
keep_remaining=False,
)
)
def _list_like_func(self, func, axis, *args, **kwargs):
"""
Apply passed functions to each row/column.
Parameters
----------
func : list of callable
List of functions to apply against each row/column.
axis : {0, 1}
Target axis to apply functions along. 0 means apply to columns,
1 means apply to rows.
*args : args
Arguments to pass to the specified functions.
**kwargs : kwargs
Arguments to pass to the specified functions.
Returns
-------
PandasQueryCompiler
New QueryCompiler containing the results of passed functions.
"""
# When the function is list-like, the function names become the index/columns
new_index = (
[f if isinstance(f, str) else f.__name__ for f in func]
if axis == 0
else self.index
)
new_columns = (
[f if isinstance(f, str) else f.__name__ for f in func]
if axis == 1
else self.columns
)
func = [wrap_udf_function(f) if callable(f) else f for f in func]
new_modin_frame = self._modin_frame.apply_full_axis(
axis,
lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs)),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def rowwise_query(self, expr, **kwargs):
"""
Query the columns of a ``PandasQueryCompiler`` with a boolean row-wise expression.
Basically, in row-wise expressions we only allow column names, constants
and other variables captured using the '@' symbol. No function/method
cannot be called inside such expressions.
Parameters
----------
expr : str
Row-wise boolean expression.
**kwargs : dict
Arguments to pass to the ``pandas.DataFrame.query()``.
Returns
-------
PandasQueryCompiler
Raises
------
NotImplementedError
In case the passed expression cannot be executed row-wise.
"""
# Walk through the AST and verify it doesn't contain any nodes that
# prevent us from executing the query row-wise (we're basically
# looking for 'ast.Call')
nodes = ast.parse(expr.replace("@", "")).body
is_row_wise_query = True
while nodes:
node = nodes.pop()
if isinstance(node, ast.Expr):
node = getattr(node, "value", node)
if isinstance(node, ast.UnaryOp):
nodes.append(node.operand)
elif isinstance(node, ast.BinOp):
nodes.extend([node.left, node.right])
elif isinstance(node, ast.BoolOp):
nodes.extend(node.values)
elif isinstance(node, ast.Compare):
nodes.extend([node.left] + node.comparators)
elif isinstance(node, (ast.Name, ast.Constant)):
pass
else:
# if we end up here then the expression is no longer simple
# enough to run it row-wise, so exiting
is_row_wise_query = False
break
if not is_row_wise_query:
raise NotImplementedError("A non row-wise query was passed.")
def query_builder(df, **modin_internal_kwargs):
return df.query(expr, inplace=False, **kwargs, **modin_internal_kwargs)
return self.__constructor__(self._modin_frame.filter(1, query_builder))
def _callable_func(self, func, axis, *args, **kwargs):
"""
Apply passed function to each row/column.
Parameters
----------
func : callable or str
Function to apply.
axis : {0, 1}
Target axis to apply function along. 0 means apply to columns,
1 means apply to rows.
*args : args
Arguments to pass to the specified function.
**kwargs : kwargs
Arguments to pass to the specified function.
Returns
-------
PandasQueryCompiler
New QueryCompiler containing the results of passed function
for each row/column.
"""
if callable(func):
func = wrap_udf_function(func)
new_modin_frame = self._modin_frame.apply_full_axis(
axis, lambda df: df.apply(func, axis=axis, *args, **kwargs)
)
return self.__constructor__(new_modin_frame)
# END UDF
# Manual Partitioning methods (e.g. merge, groupby)
# These methods require some sort of manual partitioning due to their
# nature. They require certain data to exist on the same partition, and
# after the shuffle, there should be only a local map required.
def _groupby_separate_by(self, by, drop):
"""
Separate internal and external groupers in `by` argument of groupby.
Parameters
----------
by : BaseQueryCompiler, column or index label, Grouper or list
drop : bool
Indicates whether or not by data came from self frame.
True, by data came from self. False, external by data.
Returns
-------
external_by : list of BaseQueryCompiler and arrays
Values to group by.
internal_by : list of str
List of column names from `self` to group by.
by_positions : list of ints
Specifies the order of grouping by `internal_by` and `external_by` columns.
Each element in `by_positions` specifies an index from either `external_by` or `internal_by`.
Indices for `external_by` are positive and start from 0. Indices for `internal_by` are negative
and start from -1 (so in order to convert them to a valid indices one should do ``-idx - 1``)
'''
by_positions = [0, -1, 1, -2, 2, 3]
internal_by = ["col1", "col2"]
external_by = [sr1, sr2, sr3, sr4]
df.groupby([sr1, "col1", sr2, "col2", sr3, sr4])
'''.
"""
if isinstance(by, type(self)):
if drop:
internal_by = by.columns.tolist()
external_by = []
by_positions = [-i - 1 for i in range(len(internal_by))]
else:
internal_by = []
external_by = [by]
by_positions = [i for i in range(len(external_by[0].columns))]
else:
if not isinstance(by, list):
by = [by] if by is not None else []
internal_by = []
external_by = []
external_by_counter = 0
by_positions = []
for o in by:
if isinstance(o, pandas.Grouper) and o.key in self.columns:
internal_by.append(o.key)
by_positions.append(-len(internal_by))
elif hashable(o) and o in self.columns:
internal_by.append(o)
by_positions.append(-len(internal_by))
else:
external_by.append(o)
for _ in range(len(o.columns) if isinstance(o, type(self)) else 1):
by_positions.append(external_by_counter)
external_by_counter += 1
return external_by, internal_by, by_positions
groupby_all = GroupbyReduceImpl.build_qc_method("all")
groupby_any = GroupbyReduceImpl.build_qc_method("any")
groupby_count = GroupbyReduceImpl.build_qc_method("count")
groupby_max = GroupbyReduceImpl.build_qc_method("max")
groupby_min = GroupbyReduceImpl.build_qc_method("min")
groupby_prod = GroupbyReduceImpl.build_qc_method("prod")
groupby_sum = GroupbyReduceImpl.build_qc_method("sum")
groupby_skew = GroupbyReduceImpl.build_qc_method("skew")
def groupby_nth(
self,
by,
axis,
groupby_kwargs,
agg_args,
agg_kwargs,
drop=False,
):
result = super().groupby_nth(
by, axis, groupby_kwargs, agg_args, agg_kwargs, drop
)
if not groupby_kwargs.get("as_index", True):
# pandas keeps order of columns intact, follow suit
return result.getitem_column_array(self.columns)
return result
def groupby_mean(self, by, axis, groupby_kwargs, agg_args, agg_kwargs, drop=False):
if RangePartitioning.get():
try:
return self._groupby_shuffle(
by=by,
agg_func="mean",
axis=axis,
groupby_kwargs=groupby_kwargs,
agg_args=agg_args,
agg_kwargs=agg_kwargs,
drop=drop,
)
except NotImplementedError as e:
ErrorMessage.warn(
f"Can't use range-partitioning groupby implementation because of: {e}"
+ "\nFalling back to a TreeReduce implementation."
)
_, internal_by, _ = self._groupby_separate_by(by, drop)
numeric_only = agg_kwargs.get("numeric_only", False)
datetime_cols = (
{
col: dtype
for col, dtype in zip(self.dtypes.index, self.dtypes)
if is_datetime64_any_dtype(dtype) and col not in internal_by
}
if not numeric_only
else dict()
)
if len(datetime_cols) > 0:
datetime_qc = self.getitem_array(datetime_cols)
if datetime_qc.isna().any().any(axis=1).to_pandas().squeeze():
return super().groupby_mean(
by=by,
axis=axis,
groupby_kwargs=groupby_kwargs,
agg_args=agg_args,
agg_kwargs=agg_kwargs,
drop=drop,
)
qc_with_converted_datetime_cols = (
self.astype({col: "int64" for col in datetime_cols.keys()})
if len(datetime_cols) > 0
else self
)
result = GroupbyReduceImpl.build_qc_method("mean")(
query_compiler=qc_with_converted_datetime_cols,
by=by,
axis=axis,
groupby_kwargs=groupby_kwargs,
agg_args=agg_args,
agg_kwargs=agg_kwargs,
drop=drop,
)
if len(datetime_cols) > 0:
result = result.astype({col: dtype for col, dtype in datetime_cols.items()})
return result
def groupby_size(
self,
by,
axis,
groupby_kwargs,
agg_args,
agg_kwargs,
drop=False,
):
if RangePartitioning.get():
try:
return self._groupby_shuffle(
by=by,
agg_func="size",
axis=axis,
groupby_kwargs=groupby_kwargs,
agg_args=agg_args,
agg_kwargs=agg_kwargs,
drop=drop,
)
except NotImplementedError as e:
ErrorMessage.warn(
f"Can't use range-partitioning groupby implementation because of: {e}"
+ "\nFalling back to a TreeReduce implementation."
)
result = self._groupby_dict_reduce(
by=by,
axis=axis,
agg_func={self.columns[0]: [("__size_col__", "size")]},
agg_args=agg_args,
agg_kwargs=agg_kwargs,
groupby_kwargs=groupby_kwargs,
drop=drop,
method="size",
default_to_pandas_func=lambda grp: grp.size(),
)
if groupby_kwargs.get("as_index", True):
result.columns = [MODIN_UNNAMED_SERIES_LABEL]
elif isinstance(result.columns, pandas.MultiIndex):
# Dropping one extra-level which was added because of renaming aggregation
result.columns = (
result.columns[:-1].droplevel(-1).append(pandas.Index(["size"]))
)
return result
def _groupby_dict_reduce(
self,
by,
agg_func,
axis,
groupby_kwargs,
agg_args,
agg_kwargs,
drop=False,
**kwargs,
):
"""
Group underlying data and apply aggregation functions to each group of the specified column/row.
This method is responsible of performing dictionary groupby aggregation for such functions,
that can be implemented via TreeReduce approach.
Parameters
----------
by : PandasQueryCompiler, column or index label, Grouper or list of such
Object that determine groups.
agg_func : dict(label) -> str
Dictionary that maps row/column labels to the function names.
**Note:** specified functions have to be supported by ``modin.core.dataframe.algebra.GroupByReduce``.
Supported functions are listed in the ``modin.core.dataframe.algebra.GroupByReduce.groupby_reduce_functions``
dictionary.
axis : {0, 1}
Axis to group and apply aggregation function along.
0 is for index, when 1 is for columns.
groupby_kwargs : dict
GroupBy parameters in the format of ``modin.pandas.DataFrame.groupby`` signature.
agg_args : list-like
Serves the compatibility purpose. Does not affect the result.
agg_kwargs : dict
Arguments to pass to the aggregation functions.
drop : bool, default: False
If `by` is a QueryCompiler indicates whether or not by-data came
from the `self`.
**kwargs : dict
Additional parameters to pass to the ``modin.core.dataframe.algebra.GroupByReduce.register``.
Returns
-------
PandasQueryCompiler
New QueryCompiler containing the result of groupby dictionary aggregation.
"""
map_dict = {}
reduce_dict = {}
kwargs.setdefault(
"default_to_pandas_func",
lambda grp, *args, **kwargs: grp.agg(agg_func, *args, **kwargs),
)
rename_columns = any(
not isinstance(fn, str) and isinstance(fn, Iterable)
for fn in agg_func.values()
)
for col, col_funcs in agg_func.items():
if not rename_columns:
map_dict[col], reduce_dict[col], _ = GroupbyReduceImpl.get_impl(
col_funcs
)
continue
if isinstance(col_funcs, str):
col_funcs = [col_funcs]
map_fns = []
for i, fn in enumerate(col_funcs):
if not isinstance(fn, str) and isinstance(fn, Iterable):
new_col_name, func = fn
elif isinstance(fn, str):
new_col_name, func = fn, fn
else:
raise TypeError
map_fn, reduce_fn, _ = GroupbyReduceImpl.get_impl(func)
map_fns.append((new_col_name, map_fn))
reduced_col_name = (
(*col, new_col_name)
if isinstance(col, tuple)
else (col, new_col_name)
)
reduce_dict[reduced_col_name] = reduce_fn
map_dict[col] = map_fns
return GroupByReduce.register(map_dict, reduce_dict, **kwargs)(
query_compiler=self,
by=by,
axis=axis,
groupby_kwargs=groupby_kwargs,
agg_args=agg_args,
agg_kwargs=agg_kwargs,
drop=drop,
)
def groupby_dtypes(
self,
by,
axis,
groupby_kwargs,
agg_args,
agg_kwargs,
drop=False,
):
return self.groupby_agg(
by=by,
axis=axis,
agg_func=lambda df: df.dtypes,
how="group_wise",
agg_args=agg_args,
agg_kwargs=agg_kwargs,
groupby_kwargs=groupby_kwargs,
drop=drop,
)
@_inherit_docstrings(BaseQueryCompiler.groupby_agg)
def _groupby_shuffle(
self,
by,
agg_func,
axis,
groupby_kwargs,
agg_args,
agg_kwargs,
drop=False,
how="axis_wise",
series_groupby=False,
):
# Defaulting to pandas in case of an empty frame as we can't process it properly.
# Higher API level won't pass empty data here unless the frame has delayed
# computations. FIXME: We apparently lose some laziness here (due to index access)
# because of the inability to process empty groupby natively.
if len(self.columns) == 0 or len(self._modin_frame) == 0:
return super().groupby_agg(
by, agg_func, axis, groupby_kwargs, agg_args, agg_kwargs, how, drop
)
grouping_on_level = groupby_kwargs.get("level") is not None
if any(
isinstance(obj, pandas.Grouper)
for obj in (by if isinstance(by, list) else [by])
):
raise NotImplementedError(
"Grouping on a pandas.Grouper with range-partitioning groupby is not yet supported: "
+ "https://github.com/modin-project/modin/issues/5926"
)
if grouping_on_level:
external_by, internal_by, by_positions = [], [], []
else:
external_by, internal_by, by_positions = self._groupby_separate_by(by, drop)
all_external_are_qcs = all(isinstance(obj, type(self)) for obj in external_by)
if not all_external_are_qcs:
raise NotImplementedError(
"Grouping on an external grouper with range-partitioning groupby is only supported with Series'es: "
+ "https://github.com/modin-project/modin/issues/5926"
)
is_transform = how == "transform" or GroupBy.is_transformation_kernel(agg_func)
if is_transform:
# https://github.com/modin-project/modin/issues/5924
ErrorMessage.mismatch_with_pandas(
operation="range-partitioning groupby",
message="the order of rows may be shuffled for the result",
)
# This check materializes dtypes for 'by' columns
if not is_transform and groupby_kwargs.get("observed", False) in (
False,
lib.no_default,
):
# The following 'dtypes' check materializes dtypes for 'by' columns
internal_dtypes = pandas.Series()
external_dtypes = pandas.Series()
if len(internal_by) > 0:
internal_dtypes = (
self._modin_frame._dtypes.lazy_get(internal_by).get()
if isinstance(self._modin_frame._dtypes, ModinDtypes)
else self.dtypes[internal_by]
)
if len(external_by) > 0:
dtypes_list = []
for obj in external_by:
if not isinstance(obj, type(self)):
# we're only interested in categorical dtypes here, which can only
# appear in modin objects
continue
dtypes_list.append(obj.dtypes)
external_dtypes = pandas.concat(dtypes_list)
by_dtypes = pandas.concat([internal_dtypes, external_dtypes])
add_missing_cats = any(
isinstance(dtype, pandas.CategoricalDtype) for dtype in by_dtypes
)
else:
add_missing_cats = False
if add_missing_cats and not groupby_kwargs.get("as_index", True):
raise NotImplementedError(
"Range-partitioning groupby is not implemented for grouping on categorical columns with "
+ "the following set of parameters {'as_index': False, 'observed': False}. Change either 'as_index' "
+ "or 'observed' to True and try again. "
+ "https://github.com/modin-project/modin/issues/5926"
)
if isinstance(agg_func, dict):
assert (
how == "axis_wise"
), f"Only 'axis_wise' aggregation is supported with dictionary functions, got: {how}"
subset = internal_by + list(agg_func.keys())
# extracting unique values; no we can't use np.unique here as it would
# convert a list of tuples to a 2D matrix and so mess up the result
subset = list(dict.fromkeys(subset))
obj = self.getitem_column_array(subset)
else:
obj = self
agg_method = (
SeriesGroupByDefault if series_groupby else GroupByDefault
).get_aggregation_method(how)
original_agg_func = agg_func
def agg_func(grp, *args, **kwargs):
result = agg_method(grp, original_agg_func, *args, **kwargs)
# Convert Series to DataFrame
if result.ndim == 1:
result = result.to_frame(
MODIN_UNNAMED_SERIES_LABEL if result.name is None else result.name
)
return result
result = obj._modin_frame.groupby(
axis=axis,
internal_by=internal_by,
external_by=[
obj._modin_frame if isinstance(obj, type(self)) else obj
for obj in external_by
],
by_positions=by_positions,
series_groupby=series_groupby,
operator=lambda grp: agg_func(grp, *agg_args, **agg_kwargs),
# UDFs passed to '.apply()' are allowed to produce results with arbitrary shapes,
# that's why we have to align the partition's shapes/labeling across different
# row partitions
align_result_columns=how == "group_wise",
add_missing_cats=add_missing_cats,
**groupby_kwargs,
)
result_qc: PandasQueryCompiler = self.__constructor__(result)
if not is_transform and not groupby_kwargs.get("as_index", True):
return result_qc.reset_index(drop=True)
return result_qc
def groupby_corr(
self,
by,
axis,
groupby_kwargs,
agg_args,
agg_kwargs,
drop=False,
):
ErrorMessage.default_to_pandas("`GroupBy.corr`")
# TODO(https://github.com/modin-project/modin/issues/1323) implement this.
# Right now, using this class's groupby_agg method, even with how="group_wise",
# produces a result with the wrong index, so default to pandas by using the
# super class's groupby_agg method.
return super().groupby_agg(
by=by,
agg_func="corr",
axis=axis,
groupby_kwargs=groupby_kwargs,
agg_args=agg_args,
agg_kwargs=agg_kwargs,
drop=drop,
)
def groupby_cov(
self,
by,
axis,
groupby_kwargs,
agg_args,
agg_kwargs,
drop=False,
):
ErrorMessage.default_to_pandas("`GroupBy.cov`")
# TODO(https://github.com/modin-project/modin/issues/1322) implement this.
# Right now, using this class's groupby_agg method, even with how="group_wise",
# produces a result with the wrong index, so default to pandas by using the
# super class's groupby_agg method.
return super().groupby_agg(
by=by,
agg_func="cov",
axis=axis,
groupby_kwargs=groupby_kwargs,
agg_args=agg_args,
agg_kwargs=agg_kwargs,
drop=drop,
)
def groupby_rolling(
self,
by,
agg_func,
axis,
groupby_kwargs,
rolling_kwargs,
agg_args,
agg_kwargs,
drop=False,
):
# 'corr' and 'cov' require knowledge about the whole row axis (all columns have
# to be available in the same partitions), this requirement is not being satisfied
# in the current groupby implementation
unsupported_groupby = (
agg_func in ("corr", "cov") or rolling_kwargs.get("on") is not None
)
if isinstance(agg_func, str):
str_func = agg_func
def agg_func(window, *args, **kwargs):
return getattr(window, str_func)(*args, **kwargs)
else:
assert callable(agg_func)
kwargs = {
"by": by,
"agg_func": lambda grp, *args, **kwargs: agg_func(
grp.rolling(**rolling_kwargs), *args, **kwargs
),
"axis": axis,
"groupby_kwargs": groupby_kwargs,
"agg_args": agg_args,
"agg_kwargs": agg_kwargs,
"how": "direct",
"drop": drop,
}
if unsupported_groupby:
return super(PandasQueryCompiler, self).groupby_agg(**kwargs)
try:
return self._groupby_shuffle(**kwargs)
except NotImplementedError as e:
get_logger().info(
f"Can't use range-partitioning groupby implementation because of: {e}"
+ "\nFalling back to a full-axis implementation."
)
return self.groupby_agg(**kwargs)
def groupby_agg(
self,
by,
agg_func,
axis,
groupby_kwargs,
agg_args,
agg_kwargs,
how="axis_wise",
drop=False,
series_groupby=False,
):
# Defaulting to pandas in case of an empty frame as we can't process it properly.
# Higher API level won't pass empty data here unless the frame has delayed
# computations. So we apparently lose some laziness here (due to index access)
# because of the inability to process empty groupby natively.
if len(self.columns) == 0 or len(self._modin_frame) == 0:
return super().groupby_agg(
by, agg_func, axis, groupby_kwargs, agg_args, agg_kwargs, how, drop
)
# 'group_wise' means 'groupby.apply()'. We're certain that range-partitioning groupby
# always works better for '.apply()', so we're using it regardless of the 'RangePartitioning'
# value
if how == "group_wise" or RangePartitioning.get():
try:
return self._groupby_shuffle(
by=by,
agg_func=agg_func,
axis=axis,
groupby_kwargs=groupby_kwargs,
agg_args=agg_args,
agg_kwargs=agg_kwargs,
drop=drop,
how=how,
series_groupby=series_groupby,
)
except NotImplementedError as e:
# if a user wants to use range-partitioning groupby explicitly, then we should print a visible
# warning to them on a failure, otherwise we're only logging it
message = (
f"Can't use range-partitioning groupby implementation because of: {e}"
+ "\nFalling back to a full-axis implementation."
)
get_logger().info(message)
if RangePartitioning.get():
ErrorMessage.warn(message)
if isinstance(agg_func, dict) and GroupbyReduceImpl.has_impl_for(agg_func):
return self._groupby_dict_reduce(
by, agg_func, axis, groupby_kwargs, agg_args, agg_kwargs, drop
)
is_transform_method = how == "transform" or (
isinstance(agg_func, str) and agg_func in transformation_kernels
)
original_agg_func = agg_func
if isinstance(agg_func, dict):
assert (
how == "axis_wise"
), f"Only 'axis_wise' aggregation is supported with dictionary functions, got: {how}"
else:
agg_method = (
SeriesGroupByDefault if series_groupby else GroupByDefault
).get_aggregation_method(how)
def agg_func(grp, *args, **kwargs):
return agg_method(grp, original_agg_func, *args, **kwargs)
# since we're going to modify `groupby_kwargs` dict in a `groupby_agg_builder`,
# we want to copy it to not propagate these changes into source dict, in case
# of unsuccessful end of function
groupby_kwargs = groupby_kwargs.copy()
as_index = groupby_kwargs.get("as_index", True)
external_by, internal_by, _ = self._groupby_separate_by(by, drop)
internal_qc = (
[self.getitem_column_array(internal_by)] if len(internal_by) else []
)
by = internal_qc + external_by
broadcastable_by = [o._modin_frame for o in by if isinstance(o, type(self))]
not_broadcastable_by = [o for o in by if not isinstance(o, type(self))]
def groupby_agg_builder(df, by=None, drop=False, partition_idx=None):
"""
Compute groupby aggregation for a single partition.
Parameters
----------
df : pandas.DataFrame
Partition of the self frame.
by : pandas.DataFrame, optional
Broadcasted partition which contains `by` columns.
drop : bool, default: False
Indicates whether `by` partition came from the `self` frame.
partition_idx : int, optional
Positional partition index along groupby axis.
Returns
-------
pandas.DataFrame
DataFrame containing the result of groupby aggregation
for this particular partition.
"""
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_kwargs["as_index"] = True
# We have to filter func-dict BEFORE inserting broadcasted 'by' columns
# to avoid multiple aggregation results for 'by' cols in case they're
# present in the func-dict:
partition_agg_func = GroupByReduce.get_callable(agg_func, df)
internal_by_cols = pandas.Index([])
missed_by_cols = pandas.Index([])
if by is not None:
internal_by_df = by[internal_by]
if isinstance(internal_by_df, pandas.Series):
internal_by_df = internal_by_df.to_frame()
missed_by_cols = internal_by_df.columns.difference(df.columns)
if len(missed_by_cols) > 0:
df = pandas.concat(
[df, internal_by_df[missed_by_cols]],
axis=1,
copy=False,
)
internal_by_cols = internal_by_df.columns
external_by = by.columns.difference(internal_by).unique()
external_by_df = by[external_by].squeeze(axis=1)
if isinstance(external_by_df, pandas.DataFrame):
external_by_cols = [o for _, o in external_by_df.items()]
else:
external_by_cols = [external_by_df]
by = internal_by_cols.tolist() + external_by_cols
else:
by = []
by += not_broadcastable_by
level = groupby_kwargs.get("level", None)
if level is not None and not by:
by = None
by_length = len(level) if is_list_like(level) else 1
else:
by_length = len(by)
def compute_groupby(df, drop=False, partition_idx=0):
"""Compute groupby aggregation for a single partition."""
target_df = df.squeeze(axis=1) if series_groupby else df
grouped_df = target_df.groupby(by=by, axis=axis, **groupby_kwargs)
try:
result = partition_agg_func(grouped_df, *agg_args, **agg_kwargs)
except DataError:
# This happens when the partition is filled with non-numeric data and a
# numeric operation is done. We need to build the index here to avoid
# issues with extracting the index.
result = pandas.DataFrame(index=grouped_df.size().index)
if isinstance(result, pandas.Series):
result = result.to_frame(
result.name
if result.name is not None
else MODIN_UNNAMED_SERIES_LABEL
)
selection = agg_func.keys() if isinstance(agg_func, dict) else None
if selection is None:
# Some pandas built-in aggregation functions aggregate 'by' columns
# (for example 'apply', 'dtypes', maybe more...). Since we make sure
# that all of the 'by' columns are presented in every partition by
# inserting the missed ones, we will end up with all of the 'by'
# columns being aggregated in every partition. To avoid duplications
# in the result we drop all of the 'by' columns that were inserted
# in this partition AFTER handling 'as_index' parameter. The order
# is important for proper naming-conflicts handling.
misaggregated_cols = missed_by_cols.intersection(result.columns)
else:
misaggregated_cols = []
if not as_index:
GroupBy.handle_as_index_for_dataframe(
result,
internal_by_cols,
by_cols_dtypes=df[internal_by_cols].dtypes.values,
by_length=by_length,
selection=selection,
partition_idx=partition_idx,
drop=drop,
inplace=True,
method="transform" if is_transform_method else None,
)
else:
new_index_names = tuple(
(
None
if isinstance(name, str)
and name.startswith(MODIN_UNNAMED_SERIES_LABEL)
else name
)
for name in result.index.names
)
result.index.names = new_index_names
if len(misaggregated_cols) > 0:
result.drop(columns=misaggregated_cols, inplace=True)
return result
try:
return compute_groupby(df, drop, partition_idx)
except (ValueError, KeyError):
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
return compute_groupby(df.copy(), drop, partition_idx)
if isinstance(original_agg_func, dict):
apply_indices = list(agg_func.keys())
elif isinstance(original_agg_func, list):
apply_indices = self.columns.difference(internal_by).tolist()
else:
apply_indices = None
if (
# For now handling only simple cases, where 'by' columns are described by a single query compiler
agg_kwargs.get("as_index", True)
and len(not_broadcastable_by) == 0
and len(broadcastable_by) == 1
and broadcastable_by[0].has_materialized_dtypes
):
new_index = ModinIndex(
# actual value will be assigned on a parent update
value=None,
axis=0,
dtypes=broadcastable_by[0].dtypes,
)
else:
new_index = None
new_modin_frame = self._modin_frame.broadcast_apply_full_axis(
axis=axis,
func=lambda df, by=None, partition_idx=None: groupby_agg_builder(
df, by, drop, partition_idx
),
other=broadcastable_by,
new_index=new_index,
apply_indices=apply_indices,
enumerate_partitions=True,
)
result = self.__constructor__(new_modin_frame)
# that means that exception in `compute_groupby` was raised
# in every partition, so we also should raise it
if (
len(result.columns) == 0
and len(self.columns) != 0
and agg_kwargs.get("numeric_only", False)
):
raise TypeError("No numeric types to aggregate.")
return result
# END Manual Partitioning methods
def pivot(self, index, columns, values):
from pandas.core.reshape.pivot import _convert_by
def __convert_by(by):
"""Convert passed value to a list."""
if isinstance(by, pandas.Index):
by = list(by)
by = _convert_by(by)
if (
len(by) > 0
and (not is_list_like(by[0]) or isinstance(by[0], tuple))
and not all([key in self.columns for key in by])
):
by = [by]
return by
index, columns, values = map(__convert_by, [index, columns, values])
is_custom_index = (
len(index) == 1
and is_list_like(index[0])
and not isinstance(index[0], tuple)
)
if is_custom_index or len(index) == 0:
to_reindex = columns
else:
to_reindex = index + columns
if len(values) != 0:
obj = self.getitem_column_array(to_reindex + values)
else:
obj = self
if is_custom_index:
obj.index = index
reindexed = self.__constructor__(
obj._modin_frame.apply_full_axis(
1,
lambda df: df.set_index(to_reindex, append=(len(to_reindex) == 1)),
new_columns=obj.columns.drop(to_reindex),
)
)
unstacked = reindexed.unstack(level=columns, fill_value=None)
if len(reindexed.columns) == 1 and unstacked.columns.nlevels > 1:
unstacked.columns = unstacked.columns.droplevel(0)
return unstacked
def pivot_table(
self,
index,
values,
columns,
aggfunc,
fill_value,
margins,
dropna,
margins_name,
observed,
sort,
):
ErrorMessage.mismatch_with_pandas(
operation="pivot_table",
message="Order of columns could be different from pandas",
)
from pandas.core.reshape.pivot import _convert_by
def __convert_by(by):
"""Convert passed value to a list."""
if isinstance(by, pandas.Index):
return list(by)
return _convert_by(by)
is_1d_values = values is not None and not is_list_like(values)
index, columns = map(__convert_by, [index, columns])
if len(index) + len(columns) == 0:
raise ValueError("No group keys passed!")
if is_1d_values and len(index) > 0 and len(columns) > 0:
drop_column_level = 1 if isinstance(aggfunc, list) else 0
else:
drop_column_level = None
# if the value is 'None' it will be converted to an empty list (no columns to aggregate),
# which is invalid for 'values', as 'None' means aggregate ALL columns instead
if values is not None:
values = __convert_by(values)
# using 'pandas.unique' instead of 'numpy' as it guarantees to not change the original order
unique_keys = pandas.Series(index + columns).unique()
kwargs = {
"qc": self,
"unique_keys": unique_keys,
"drop_column_level": drop_column_level,
"pivot_kwargs": {
"index": index,
"values": values,
"columns": columns,
"aggfunc": aggfunc,
"fill_value": fill_value,
"margins": margins,
"dropna": dropna,
"margins_name": margins_name,
"observed": observed,
"sort": sort,
},
}
try:
return PivotTableImpl.map_reduce_impl(**kwargs)
except NotImplementedError as e:
message = (
f"Can't use MapReduce 'pivot_table' implementation because of: {e}"
+ "\nFalling back to a range-partitioning implementation."
)
get_logger().info(message)
try:
return PivotTableImpl.range_partition_impl(**kwargs)
except NotImplementedError as e:
message = (
f"Can't use range-partitioning 'pivot_table' implementation because of: {e}"
+ "\nFalling back to a full-axis implementation."
)
get_logger().info(message)
return PivotTableImpl.full_axis_impl(**kwargs)
# Get_dummies
def get_dummies(self, columns, **kwargs):
# `columns` as None does not mean all columns, by default it means only
# non-numeric columns.
if columns is None:
columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])]
# If we aren't computing any dummies, there is no need for any
# remote compute.
if len(columns) == 0:
return self.copy()
elif not is_list_like(columns):
columns = [columns]
def map_fn(df): # pragma: no cover
cols_to_encode = df.columns.intersection(columns)
return pandas.get_dummies(df, columns=cols_to_encode, **kwargs)
# In some cases, we are mapping across all of the data. It is more
# efficient if we are mapping over all of the data to do it this way
# than it would be to reuse the code for specific columns.
if len(columns) == len(self.columns):
new_modin_frame = self._modin_frame.apply_full_axis(
0, map_fn, new_index=self.index, dtypes=bool
)
untouched_frame = None
else:
new_modin_frame = self._modin_frame.take_2d_labels_or_positional(
col_labels=columns
).apply_full_axis(0, map_fn, new_index=self.index, dtypes=bool)
untouched_frame = self.drop(columns=columns)
# If we mapped over all the data we are done. If not, we need to
# prepend the `new_modin_frame` with the raw data from the columns that were
# not selected.
if len(columns) != len(self.columns):
new_modin_frame = untouched_frame._modin_frame.concat(
1, [new_modin_frame], how="left", sort=False
)
return self.__constructor__(new_modin_frame)
# END Get_dummies
# Indexing
def take_2d_positional(self, index=None, columns=None):
return self.__constructor__(
self._modin_frame.take_2d_labels_or_positional(
row_positions=index, col_positions=columns
)
)
def write_items(
self, row_numeric_index, col_numeric_index, item, need_columns_reindex=True
):
# We have to keep this import away from the module level to avoid circular import
from modin.pandas.utils import broadcast_item, is_scalar
def iloc_mut(partition, row_internal_indices, col_internal_indices, item):
"""
Write `value` in a specified location in a single partition.
Parameters
----------
partition : pandas.DataFrame
Partition of the self frame.
row_internal_indices : list of ints
Positional indices of rows in this particular partition
to write `item` to.
col_internal_indices : list of ints
Positional indices of columns in this particular partition
to write `item` to.
item : 2D-array
Value to write.
Returns
-------
pandas.DataFrame
Partition data with updated values.
"""
partition = partition.copy()
try:
partition.iloc[row_internal_indices, col_internal_indices] = item
except ValueError:
# `copy` is needed to avoid "ValueError: buffer source array is read-only" for `item`
# because the item may be converted to the type that is in the dataframe.
# TODO: in the future we will need to convert to the correct type manually according
# to the following warning. Example: "FutureWarning: Setting an item of incompatible
# dtype is deprecated and will raise in a future error of pandas. Value '[1.38629436]'
# has dtype incompatible with int64, please explicitly cast to a compatible dtype first."
partition.iloc[row_internal_indices, col_internal_indices] = item.copy()
return partition
if not is_scalar(item):
(
broadcasted_item,
broadcasted_dtypes,
row_numeric_index,
col_numeric_index,
) = broadcast_item(
self,
row_numeric_index,
col_numeric_index,
item,
need_columns_reindex=need_columns_reindex,
)
else:
broadcasted_item, broadcasted_dtypes = item, pandas.Series(
[extract_dtype(item)] * len(col_numeric_index)
)
new_dtypes = None
if (
# compute dtypes only if assigning entire columns
isinstance(row_numeric_index, slice)
and row_numeric_index == slice(None)
and self.frame_has_materialized_dtypes
):
new_dtypes = self.dtypes.copy()
new_dtypes.iloc[col_numeric_index] = broadcasted_dtypes.values
new_modin_frame = self._modin_frame.apply_select_indices(
axis=None,
func=iloc_mut,
row_labels=row_numeric_index,
col_labels=col_numeric_index,
new_index=self.index,
new_columns=self.columns,
new_dtypes=new_dtypes,
keep_remaining=True,
item_to_distribute=broadcasted_item,
)
return self.__constructor__(new_modin_frame)
def sort_rows_by_column_values(self, columns, ascending=True, **kwargs):
new_modin_frame = self._modin_frame.sort_by(
0, columns, ascending=ascending, **kwargs
)
return self.__constructor__(new_modin_frame)
def sort_columns_by_row_values(self, rows, ascending=True, **kwargs):
if not is_list_like(rows):
rows = [rows]
ErrorMessage.default_to_pandas("sort_values")
broadcast_value_list = [
self.getitem_row_array([row]).to_pandas() for row in rows
]
index_builder = list(zip(broadcast_value_list, rows))
broadcast_values = pandas.concat(
[row for row, idx in index_builder], copy=False
)
broadcast_values.columns = self.columns
new_columns = broadcast_values.sort_values(
by=rows, axis=1, ascending=ascending, **kwargs
).columns
return self.reindex(axis=1, labels=new_columns)
# Cat operations
def cat_codes(self):
def func(df: pandas.DataFrame) -> pandas.DataFrame:
ser = df.iloc[:, 0]
return ser.cat.codes.to_frame(name=MODIN_UNNAMED_SERIES_LABEL)
res = self._modin_frame.map(func=func, new_columns=[MODIN_UNNAMED_SERIES_LABEL])
return self.__constructor__(res, shape_hint="column")
# END Cat operations
def compare(self, other, **kwargs):
return self.__constructor__(
self._modin_frame.broadcast_apply_full_axis(
0,
lambda left, right: pandas.DataFrame.compare(
left, other=right, **kwargs
),
other._modin_frame,
)
)
def case_when(self, caselist):
qc_type = type(self)
caselist = [
tuple(
data._modin_frame if isinstance(data, qc_type) else data
for data in case_tuple
)
for case_tuple in caselist
]
return self.__constructor__(
self._modin_frame.case_when(caselist),
shape_hint=self._shape_hint,
)
| PandasQueryCompiler |
python | jazzband__django-redis | tests/test_hashring.py | {
"start": 61,
"end": 716
} | class ____:
def __init__(self, identifier):
self.identifier = identifier
def __str__(self):
return f"node:{self.identifier}"
def __repr__(self):
return f"<Node {self.identifier}>"
@pytest.fixture
def hash_ring():
return HashRing([Node(i) for i in range(3)])
def test_hashring(hash_ring):
ids = []
for key in [f"test{x}" for x in range(10)]:
node = hash_ring.get_node(key)
ids.append(node.identifier)
assert ids == [0, 2, 1, 2, 2, 2, 2, 0, 1, 1]
def test_hashring_brute_force(hash_ring):
for key in (f"test{x}" for x in range(10000)):
assert hash_ring.get_node(key)
| Node |
python | gwtw__py-sorting | test/base_string_sort_test.py | {
"start": 0,
"end": 1882
} | class ____(object):
def test_sorts_sorted_character_array(self):
self.assertEqual(["a","b","c","d","e"], self.sort(["a","b","c","d","e"]))
def test_sorts_reverse_sorted_character_array(self):
self.assertEqual(["a","b","c","d","e"], self.sort(["e","d","c","b","a"]))
def test_sorts_sorted_character_array_with_two_values_swapped(self):
self.assertEqual(["a","b","c","d","e"], self.sort(["a","c","b","d","e"]))
def test_sorts_jumbled_character_array(self):
self.assertEqual(
["a","b","c","d","e","f","g","h","i","j"],
self.sort(["i","b","a","h","c","g","d","f","e","j"]))
def test_sorts_character_array_with_duplicate_values(self):
self.assertEqual(
["a","a","b","b","c","c","d","d"],
self.sort(["b","c","a","d","c","a","b","d"]))
def test_sorts_sorted_string_array(self):
self.assertEqual(
["aa","bb","cc","dd","ee"],
self.sort(["aa","bb","cc","dd","ee"]))
def test_sorts_reverse_sorted_string_array(self):
self.assertEqual(
["aa","bb","cc","dd","ee"],
self.sort(["ee","dd","cc","bb","aa"]))
def test_sorts_sorted_string_array_with_two_values_swapped(self):
self.assertEqual(
["aa","bb","cc","dd","ee"],
self.sort(["aa","cc","bb","dd","ee"]))
def test_sorts_jumbled_string_array(self):
self.assertEqual(
["aa","bb","cc","dd","ee","ff","gg","hh","ii","jj"],
self.sort(["ii","bb","aa","hh","cc","gg","dd","ff","ee","jj"]))
def test_sorts_string_array_with_duplicate_values(self):
self.assertEqual(
["aa","aa","bb","bb","cc","cc","dd","dd"],
self.sort(["bb","cc","aa","dd","cc","aa","bb","dd"]))
def test_sorts_second_character_in_a_string_array(self):
self.assertEqual(
["aa","ab","ba","bb","ca","cb","da","db"],
self.sort(["bb","ca","ab","da","cb","aa","ba","db"]))
| BaseStringSortTest |
python | kamyu104__LeetCode-Solutions | Python/maximize-score-after-n-operations.py | {
"start": 84,
"end": 950
} | class ____(object):
def maxScore(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def popcount(n):
count = 0
while n:
n &= n-1
count += 1
return count
def bits(mask):
result = []
i = 0
while mask:
if mask&1:
result.append(i)
i += 1
mask >>= 1
return result
dp = [0]*(2**len(nums))
for mask in xrange(3, len(dp)):
cnt = popcount(mask)
if cnt%2:
continue
for i, j in itertools.combinations(bits(mask), 2): # Time: O(n^2)
dp[mask] = max(dp[mask], cnt//2*gcd(nums[i], nums[j]) + dp[mask^(1<<i)^(1<<j)])
return dp[-1]
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/llama_dataset/rag.py | {
"start": 1175,
"end": 2685
} | class ____(BaseLlamaDataExample):
"""
RAG example class. Analogous to traditional ML datasets, this dataset contains
the "features" (i.e., query + context) to make a prediction and the "label" (i.e., response)
to evaluate the prediction.
Args:
query (str): The user query
query_by (CreatedBy): Query generated by human or ai (model-name)
reference_contexts (Optional[List[str]]): The contexts used for response
reference_answer ([str]): Reference answer to the query. An answer
that would receive full marks upon evaluation.
reference_answer_by: The reference answer generated by human or ai (model-name).
"""
query: str = Field(
default_factory=str, description="The user query for the example."
)
query_by: Optional[CreatedBy] = Field(
default=None, description="What generated the query."
)
reference_contexts: Optional[List[str]] = Field(
default=None,
description="The contexts used to generate the reference answer.",
)
reference_answer: str = Field(
default_factory=str,
description="The reference (ground-truth) answer to the example.",
)
reference_answer_by: Optional[CreatedBy] = Field(
default=None, description="What generated the reference answer."
)
@property
def class_name(self) -> str:
"""Data example class name."""
return "LabelledRagDataExample"
| LabelledRagDataExample |
python | Unity-Technologies__ml-agents | ml-agents-trainer-plugin/mlagents_trainer_plugin/dqn/dqn_optimizer.py | {
"start": 1553,
"end": 6732
} | class ____(TorchOptimizer):
def __init__(self, policy: TorchPolicy, trainer_settings: TrainerSettings):
super().__init__(policy, trainer_settings)
# initialize hyper parameters
params = list(self.policy.actor.parameters())
self.optimizer = torch.optim.Adam(
params, lr=self.trainer_settings.hyperparameters.learning_rate
)
self.stream_names = list(self.reward_signals.keys())
self.gammas = [_val.gamma for _val in trainer_settings.reward_signals.values()]
self.use_dones_in_backup = {
name: int(not self.reward_signals[name].ignore_done)
for name in self.stream_names
}
self.hyperparameters: DQNSettings = cast(
DQNSettings, trainer_settings.hyperparameters
)
self.tau = self.hyperparameters.tau
self.decay_learning_rate = ModelUtils.DecayedValue(
self.hyperparameters.learning_rate_schedule,
self.hyperparameters.learning_rate,
1e-10,
self.trainer_settings.max_steps,
)
self.decay_exploration_rate = ModelUtils.DecayedValue(
self.hyperparameters.exploration_schedule,
self.hyperparameters.exploration_initial_eps,
self.hyperparameters.exploration_final_eps,
20000,
)
# initialize Target Q_network
self.q_net_target = QNetwork(
stream_names=self.reward_signals.keys(),
observation_specs=policy.behavior_spec.observation_specs,
network_settings=policy.network_settings,
action_spec=policy.behavior_spec.action_spec,
)
ModelUtils.soft_update(self.policy.actor, self.q_net_target, 1.0)
self.q_net_target.to(default_device())
@property
def critic(self):
return self.q_net_target
@timed
def update(self, batch: AgentBuffer, num_sequences: int) -> Dict[str, float]:
"""
Performs update on model.
:param batch: Batch of experiences.
:param num_sequences: Number of sequences to process.
:return: Results of update.
"""
# Get decayed parameters
decay_lr = self.decay_learning_rate.get_value(self.policy.get_current_step())
exp_rate = self.decay_exploration_rate.get_value(self.policy.get_current_step())
self.policy.actor.exploration_rate = exp_rate
rewards = {}
for name in self.reward_signals:
rewards[name] = ModelUtils.list_to_tensor(
batch[RewardSignalUtil.rewards_key(name)]
)
n_obs = len(self.policy.behavior_spec.observation_specs)
current_obs = ObsUtil.from_buffer(batch, n_obs)
# Convert to tensors
current_obs = [ModelUtils.list_to_tensor(obs) for obs in current_obs]
next_obs = ObsUtil.from_buffer_next(batch, n_obs)
# Convert to tensors
next_obs = [ModelUtils.list_to_tensor(obs) for obs in next_obs]
actions = AgentAction.from_buffer(batch)
dones = ModelUtils.list_to_tensor(batch[BufferKey.DONE])
current_q_values, _ = self.policy.actor.critic_pass(
current_obs, sequence_length=self.policy.sequence_length
)
qloss = []
with torch.no_grad():
greedy_actions = self.policy.actor.get_greedy_action(current_q_values)
next_q_values_list, _ = self.q_net_target.critic_pass(
next_obs, sequence_length=self.policy.sequence_length
)
for name_i, name in enumerate(rewards.keys()):
with torch.no_grad():
next_q_values = torch.gather(
next_q_values_list[name], dim=1, index=greedy_actions
).squeeze()
target_q_values = rewards[name] + (
(1.0 - self.use_dones_in_backup[name] * dones)
* self.gammas[name_i]
* next_q_values
)
target_q_values = target_q_values.reshape(-1, 1)
curr_q = torch.gather(
current_q_values[name], dim=1, index=actions.discrete_tensor
)
qloss.append(torch.nn.functional.smooth_l1_loss(curr_q, target_q_values))
loss = torch.mean(torch.stack(qloss))
ModelUtils.update_learning_rate(self.optimizer, decay_lr)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
ModelUtils.soft_update(self.policy.actor, self.q_net_target, self.tau)
update_stats = {
"Losses/Value Loss": loss.item(),
"Policy/Learning Rate": decay_lr,
"Policy/epsilon": exp_rate,
}
for reward_provider in self.reward_signals.values():
update_stats.update(reward_provider.update(batch))
return update_stats
def get_modules(self):
modules = {
"Optimizer:value_optimizer": self.optimizer,
"Optimizer:critic": self.critic,
}
for reward_provider in self.reward_signals.values():
modules.update(reward_provider.get_modules())
return modules
| DQNOptimizer |
python | bokeh__bokeh | tests/unit/bokeh/models/widgets/test_slider.py | {
"start": 4860,
"end": 8530
} | class ____:
def test_value_and_value_throttled(self) -> None:
start = datetime(2021, 1, 1)
end = datetime(2021, 12, 31)
value = (convert_datetime_type(datetime(2021, 2, 1)), convert_datetime_type(datetime(2021, 2, 28)))
s0 = mws.DateRangeSlider(start=start, end=end)
with pytest.raises(UnsetValueError):
s0.value
with pytest.raises(UnsetValueError):
s0.value_throttled
s1 = mws.DateRangeSlider(start=start, end=end, value=value)
assert s1.value == value
assert s1.value_throttled == value
def test_value_as_datetime_when_set_as_datetime(self) -> None:
start = datetime(2017, 8, 9, 0, 0).astimezone(timezone.utc)
end = datetime(2017, 8, 10, 0, 0).astimezone(timezone.utc)
s = mws.DateRangeSlider(start=start, end=end, value=(start, end))
assert s.value_as_datetime == (start, end)
def test_value_as_datetime_when_set_as_timestamp(self) -> None:
start = datetime(2017, 8, 9, 0, 0).astimezone(timezone.utc)
end = datetime(2017, 8, 10, 0, 0).astimezone(timezone.utc)
s = mws.DateRangeSlider(start=start, end=end,
# Bokeh serializes as ms since epoch, if they get set as numbers (e.g.)
# by client side update, this is the units they will be
value=(convert_datetime_type(start), convert_datetime_type(end)))
assert s.value_as_datetime == (start, end)
def test_value_as_datetime_when_set_mixed(self) -> None:
start = datetime(2017, 8, 9, 0, 0).astimezone(timezone.utc)
end = datetime(2017, 8, 10, 0, 0).astimezone(timezone.utc)
s = mws.DateRangeSlider(start=start, end=end,
value=(start, convert_datetime_type(end)))
assert s.value_as_datetime == (start, end)
s = mws.DateRangeSlider(start=start, end=end,
value=(convert_datetime_type(start), end))
assert s.value_as_datetime == (start, end)
def test_value_as_date_when_set_as_date(self) -> None:
start = date(2017, 8, 9)
end = date(2017, 8, 10)
s = mws.DateRangeSlider(start=start, end=end, value=(start, end))
assert s.value_as_date == (start, end)
def test_value_as_date_when_set_as_timestamp(self) -> None:
start = date(2017, 8, 9)
end = date(2017, 8, 10)
s = mws.DateRangeSlider(start=start, end=end,
# Bokeh serializes as ms since epoch, if they get set as numbers (e.g.)
# by client side update, this is the units they will be
value=(convert_date_to_datetime(start), convert_date_to_datetime(end)))
assert s.value_as_date == (start, end)
def test_value_as_date_when_set_mixed(self) -> None:
start = date(2017, 8, 9)
end = date(2017, 8, 10)
s = mws.DateRangeSlider(start=start, end=end,
value=(start, convert_date_to_datetime(end)))
assert s.value_as_date == (start, end)
s = mws.DateRangeSlider(start=start, end=end,
value=(convert_date_to_datetime(start), end))
assert s.value_as_date == (start, end)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| TestDateRangeSlider |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 82864,
"end": 91545
} | class ____(test_util.TensorFlowTestCase):
def testShapesMatch(self):
"""Tests for various different shape combinations."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((2, 2, 2), (2, 1), 1),)
shapes.append(((2, 2, 2), (2, 2), 1),)
shapes.append(((2, 2, 2), (2, 3), 0),)
shapes.append(((2, 2, 2), (3,), 0),)
shapes.append(((2, 2, 2), (1,), 0),)
shapes.append(((2, 2, 3, 2), (2, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 1), 1),)
shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
with self.subTest(
params_shape=params_shape,
indices_shape=indices_shape,
batch_dims=batch_dims):
params = constant_op.constant(1.0, shape=(params_shape))
indices = constant_op.constant(
1, shape=(indices_shape), dtype=dtypes.int32)
out = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
ndims_params = len(params_shape) - batch_dims
ndims_rows = ndims_params - indices_shape[-1]
expected_out_shape = indices_shape[:-1]
if ndims_rows > 0:
expected_out_shape += params_shape[-ndims_rows:]
self.assertSequenceEqual(out.shape, expected_out_shape)
def testReducesToGatherNDWhenBatchDimIsZero(self):
"""Confirms setting batch_dims to zero reduces to tf.gather_nd."""
params = constant_op.constant(np.random.uniform(0.0, 1.0, size=(7, 8, 9)))
indices_shapes = []
indices_shapes.append((1,))
indices_shapes.append((3, 1))
indices_shapes.append((3, 3, 1))
indices_shapes.append((2,))
indices_shapes.append((3, 2))
indices_shapes.append((3, 3, 2))
indices_shapes.append((3,))
indices_shapes.append((3, 3))
indices_shapes.append((3, 3, 3))
for indices_shape in indices_shapes:
with self.subTest(indices_shape=indices_shape):
indices = np.random.randint(0, 7, size=indices_shape)
gather_nd_result = gen_array_ops.gather_nd(params, indices)
batch_gather_nd_result = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=0)
self.assertAllEqual(gather_nd_result, batch_gather_nd_result)
def testSameResultAsMapFn(self):
"""Compares results with gather_nd called on every element with map_fn."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((2, 2, 2), (2, 1), 1),)
shapes.append(((2, 2, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 1), 1),)
shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
with self.subTest(
params_shape=params_shape,
indices_shape=indices_shape,
batch_dims=batch_dims):
params = constant_op.constant(
np.random.uniform(0.0, 1.0, size=(params_shape)))
indices = np.random.randint(0, 2, size=indices_shape)
batch_gather_nd_result = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
if batch_dims > 1:
params = array_ops.reshape(
params, shape=[-1] + list(params_shape[batch_dims:]))
indices = array_ops.reshape(
indices, shape=[-1] + list(indices_shape[batch_dims:]))
map_fn_gather_nd_result = map_fn.map_fn(
fn=self._map_fn_body, elems=(params, indices), dtype=dtypes.float64)
if batch_dims > 1:
out_shape = map_fn_gather_nd_result.shape.as_list()
out_shape = list(params_shape[:batch_dims]) + out_shape[1:]
map_fn_gather_nd_result = array_ops.reshape(
map_fn_gather_nd_result, shape=out_shape)
self.assertAllEqual(map_fn_gather_nd_result, batch_gather_nd_result)
def _map_fn_body(self, elems):
return gen_array_ops.gather_nd(elems[0], elems[1])
def testBatchDimsAsTensor(self):
"""Tests Tensor batch_dims as input works as intended."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 0),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
with self.subTest(
params_shape=params_shape,
indices_shape=indices_shape,
batch_dims=batch_dims):
params = constant_op.constant(
np.random.uniform(0.0, 1.0, size=(params_shape)))
indices = np.random.randint(0, 2, size=indices_shape)
batch_gather_nd_result = array_ops.gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
batch_dims_tensor = constant_op.constant([batch_dims])
batch_gather_nd_tensor_batch_dims_result = array_ops.gather_nd(
params=params, indices=indices, batch_dims=batch_dims_tensor)
self.assertAllEqual(batch_gather_nd_tensor_batch_dims_result,
batch_gather_nd_result)
def testInvalidBatchDimsRaisesException(self):
"""Tests whether invalid batch_dims raise expected exceptions."""
params = constant_op.constant(
np.random.uniform(0.0, 1.0, size=(3, 2, 2, 3, 4)))
indices = np.random.randint(0, 2, size=(3, 2, 3))
with self.assertRaises(TypeError):
array_ops.batch_gather_nd(
params=params,
indices=indices,
batch_dims=constant_op.constant((0, 1)))
with self.assertRaises(ValueError):
array_ops.batch_gather_nd(params=params, indices=indices, batch_dims=-1)
with self.assertRaises(ValueError):
array_ops.batch_gather_nd(params=params, indices=indices, batch_dims=4)
def testNoneBatchDimensions(self):
"""Tests gather_nd works with None dimensions."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((2, 2, 2), (2, 1), 1),)
shapes.append(((2, 2, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 1), 1),)
shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
params_ph_shape = list(params_shape)
indices_ph_shape = list(indices_shape)
for i in range(batch_dims):
params_ph_shape[i] = None
indices_ph_shape[i] = None
@def_function.function
def func(params, indices):
return array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=batch_dims) # pylint: disable=cell-var-from-loop
f = func.get_concrete_function(
tensor_lib.TensorSpec(params_ph_shape, dtypes.float32),
tensor_lib.TensorSpec(indices_ph_shape, dtypes.int32))
params_val = np.ones(dtype=np.float32, shape=params_shape)
indices_val = np.ones(dtype=np.int32, shape=indices_shape)
res = f(params_val, indices_val)
row_ndims = len(params_shape) - batch_dims - indices_shape[-1]
expected_out_shape = indices_shape[:-1]
if row_ndims > 0:
expected_out_shape += params_shape[-row_ndims:]
self.assertSequenceEqual(res.shape, expected_out_shape)
@test_util.run_all_in_graph_and_eager_modes
| BatchGatherNdTest |
python | realpython__materials | tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/library/src/tic_tac_toe/game/players_async.py | {
"start": 829,
"end": 1382
} | class ____(AsyncPlayer, metaclass=abc.ABCMeta):
def __init__(self, mark: Mark, delay_seconds: float = 0.25) -> None:
super().__init__(mark)
self.delay_seconds = delay_seconds
async def get_move(self, game_state: GameState) -> Move | None:
await asyncio.sleep(self.delay_seconds)
return await self.get_computer_move(game_state)
@abc.abstractmethod
async def get_computer_move(self, game_state: GameState) -> Move | None:
"""Return the computer's move in the given game state."""
| AsyncComputerPlayer |
python | coleifer__peewee | playhouse/sqlcipher_ext.py | {
"start": 3556,
"end": 3632
} | class ____(_SqlCipherDatabase, SqliteExtDatabase):
pass
| SqlCipherExtDatabase |
python | doocs__leetcode | solution/3000-3099/3042.Count Prefix and Suffix Pairs I/Solution.py | {
"start": 0,
"end": 246
} | class ____:
def countPrefixSuffixPairs(self, words: List[str]) -> int:
ans = 0
for i, s in enumerate(words):
for t in words[i + 1 :]:
ans += t.endswith(s) and t.startswith(s)
return ans
| Solution |
python | getsentry__sentry | src/sentry/workflow_engine/typings/notification_action.py | {
"start": 733,
"end": 974
} | class ____(StrEnum):
"""
SentryAppIdentifier is an enum that represents the identifier for a Sentry app.
"""
SENTRY_APP_INSTALLATION_UUID = "sentry_app_installation_uuid"
SENTRY_APP_ID = "sentry_app_id"
| SentryAppIdentifier |
python | dask__dask | dask/tests/test_tokenize.py | {
"start": 26487,
"end": 26556
} | class ____:
a: int = dataclasses.field(init=False)
| NoValueDataClass |
python | run-llama__llama_index | llama-index-integrations/callbacks/llama-index-callbacks-agentops/llama_index/callbacks/agentops/base.py | {
"start": 927,
"end": 3074
} | class ____(BaseModel):
class Config:
arbitrary_types_allowed = True
is_agent_chat_span: Dict[str, bool] = Field(
default_factory=dict,
description="Dictionary to check whether a span originates from an agent.",
)
agent_chat_start_event: Dict[str, LLMChatStartEvent] = Field(
default_factory=dict,
description="Dictionary to hold a start event emitted by an agent.",
)
span_parent: Dict[str, Optional[str]] = Field(
default_factory=dict,
description="Dictionary to get parent span_id of a given span.",
)
span_exception: Dict[str, Set[Exception]] = Field(
default_factory=dict,
description="Dictionary to hold exceptions thrown in a span and its immediate children.",
)
def remove_span_id(self, span_id: str) -> None:
"""Remove a given span_id from all state fields."""
self.is_agent_chat_span.pop(span_id, None)
self.agent_chat_start_event.pop(span_id, None)
self.span_parent.pop(span_id, None)
self.span_exception.pop(span_id, None)
def check_is_agent_chat_span(self, span_id: Optional[str]) -> bool:
"""
Starting with a given span_id, navigate all ancestor spans to determine
whether an AgentRunStepStartEvent is associated with at least one ancestor.
"""
if not span_id:
return False
elif span_id in self.is_agent_chat_span and self.is_agent_chat_span[span_id]:
return True
else:
return self.check_is_agent_chat_span(self.span_parent.get(span_id, None))
def get_chat_start_event(
self, span_id: Optional[str]
) -> Optional[LLMChatStartEvent]:
"""
Starting with a given span_id, find the first ancestor span with an
associated LLMChatStartEvent, then return this event.
"""
if not span_id:
return None
elif span_id in self.agent_chat_start_event:
return self.agent_chat_start_event[span_id]
else:
return self.get_chat_start_event(self.span_parent.get(span_id, None))
| AgentOpsHandlerState |
python | pennersr__django-allauth | allauth/socialaccount/providers/notion/provider.py | {
"start": 267,
"end": 850
} | class ____(ProviderAccount):
def get_user(self):
return self.account.extra_data["owner"]["user"]
def get_name(self):
return self.get_user()["name"]
def get_avatar_url(self):
return self.get_user()["avatar_url"]
def get_workspace_name(self):
return self.account.extra_data["workspace_name"]
def get_workspace_icon(self):
return self.account.extra_data["workspace_icon"]
def to_str(self):
name = self.get_name()
workspace = self.get_workspace_name()
return f"{name} ({workspace})"
| NotionAccount |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.