language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
bottlepy__bottle
|
test/test_exc.py
|
{
"start": 87,
"end": 1135
}
|
class ____(ServerTestBase):
def test_no_exc(self):
@bottle.route('/')
def test(): return 'test'
self.assertBody('test', '/')
def test_memory_error(self):
@bottle.route('/')
def test(): raise MemoryError
with self.assertRaises(MemoryError):
self.urlopen("/")
def test_system_Exit(self):
@bottle.route('/')
def test(): raise SystemExit
with self.assertRaises(SystemExit):
self.urlopen("/")
def test_other_error(self):
@bottle.route('/')
def test(): raise SomeError
self.assertStatus(500, '/')
self.assertInBody('SomeError')
def test_noncatched_error(self):
@bottle.route('/')
def test(): raise SomeError
bottle.request.environ['exc_info'] = None
self.app.catchall = False
with self.assertRaises(SomeError):
self.urlopen("/")
self.app.catchall = True
self.assertStatus(500, '/')
self.assertInBody('SomeError')
|
TestAppException
|
python
|
getsentry__sentry
|
src/sentry/users/services/user/model.py
|
{
"start": 4607,
"end": 4807
}
|
class ____(TypedDict, total=False):
avatar_url: str
avatar_type: int
actor_id: int # TODO(hybrid-cloud): Remove this after the actor migration is complete
is_active: bool
|
UserUpdateArgs
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/organization_ai_conversations.py
|
{
"start": 1770,
"end": 11631
}
|
class ____(OrganizationEventsV2EndpointBase):
"""Endpoint for fetching AI agent conversation traces."""
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.DATA_BROWSING
def get(self, request: Request, organization: Organization) -> Response:
"""
Retrieve AI conversation traces for an organization.
"""
if not features.has("organizations:gen-ai-conversations", organization, actor=request.user):
return Response(status=404)
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
return Response(status=404)
serializer = OrganizationAIConversationsSerializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
validated_data = serializer.validated_data
# Create paginator with data function
def data_fn(offset: int, limit: int):
return self._get_conversations(
snuba_params=snuba_params,
offset=offset,
limit=limit,
_sort=validated_data.get("sort", "-timestamp"),
_query=validated_data.get("query", ""),
)
with handle_query_errors():
return self.paginate(
request=request,
paginator=GenericOffsetPaginator(data_fn=data_fn),
on_results=lambda results: results,
default_per_page=10,
max_per_page=100,
)
def _get_conversations(
self, snuba_params, offset: int, limit: int, _sort: str, _query: str
) -> list[dict]:
"""
Fetch conversation data by querying spans grouped by gen_ai.conversation.id.
This is a two-step process:
1. Find conversation IDs that have spans in the time range (with pagination/sorting)
2. Get complete aggregations for those conversations (all spans, ignoring time filter)
Args:
snuba_params: Snuba parameters including projects, time range, etc.
offset: Starting index for pagination
limit: Number of results to return
_sort: Sort field and direction (currently only supports timestamp sorting, unused for now)
_query: Search query (not yet implemented)
"""
# Step 1: Find conversation IDs with spans in the time range
conversation_ids_results = Spans.run_table_query(
params=snuba_params,
query_string="has:gen_ai.conversation.id",
selected_columns=[
"gen_ai.conversation.id",
"max(precise.finish_ts)",
],
orderby=["-max(precise.finish_ts)"],
offset=offset,
limit=limit,
referrer=Referrer.API_AI_CONVERSATIONS.value,
config=SearchResolverConfig(auto_fields=True),
sampling_mode="NORMAL",
)
logger.info(
"[ai-conversations] Got Conversation IDs results",
extra={"conversation_ids_results": conversation_ids_results},
)
conversation_ids: list[str] = [
conv_id
for row in conversation_ids_results.get("data", [])
if (conv_id := row.get("gen_ai.conversation.id"))
]
if not conversation_ids:
return []
# Step 2 & 3: Run aggregation and enrichment queries in parallel
with ThreadPoolExecutor(max_workers=2) as executor:
future_aggregations = executor.submit(
self._get_aggregations, snuba_params, conversation_ids
)
future_enrichment = executor.submit(
self._get_enrichment_data, snuba_params, conversation_ids
)
results = future_aggregations.result()
enrichment_data = future_enrichment.result()
# Create a map of conversation data by ID
conversations_map = {}
for row in results.get("data", []):
start_ts = row.get("min(precise.start_ts)", 0)
finish_ts = row.get("max(precise.finish_ts)", 0)
duration_ms = int((finish_ts - start_ts) * 1000) if finish_ts and start_ts else 0
timestamp_ms = int(finish_ts * 1000) if finish_ts else 0
conv_id = row.get("gen_ai.conversation.id", "")
conversations_map[conv_id] = {
"conversationId": conv_id,
"flow": [],
"duration": duration_ms,
"errors": int(row.get("failure_count()") or 0),
"llmCalls": int(row.get("count_if(gen_ai.operation.type,equals,ai_client)") or 0),
"toolCalls": int(row.get("count_if(span.op,equals,gen_ai.execute_tool)") or 0),
"totalTokens": int(row.get("sum(gen_ai.usage.total_tokens)") or 0),
"totalCost": float(row.get("sum(gen_ai.usage.total_cost)") or 0),
"timestamp": timestamp_ms,
"traceCount": 0, # Will be set in _apply_enrichment
"traceIds": [],
}
logger.info(
"[ai-conversations] Got conversations map",
extra={"conversations_map": json.dumps(conversations_map)},
)
# Preserve the order from step 1
conversations = [
conversations_map[conv_id]
for conv_id in conversation_ids
if conv_id in conversations_map
]
if conversations:
self._apply_enrichment(conversations, enrichment_data)
return conversations
def _get_aggregations(self, snuba_params, conversation_ids: list[str]) -> dict[str, Any]:
"""
Get aggregated metrics for conversations (query 2).
"""
logger.info(
"[ai-conversations] Getting complete aggregations for conversations",
extra={"conversation_ids": conversation_ids},
)
results = Spans.run_table_query(
params=snuba_params,
query_string=f"gen_ai.conversation.id:[{','.join(conversation_ids)}]",
selected_columns=[
"gen_ai.conversation.id",
"failure_count()",
"count_if(gen_ai.operation.type,equals,ai_client)",
"count_if(span.op,equals,gen_ai.execute_tool)",
"sum(gen_ai.usage.total_tokens)",
"sum(gen_ai.usage.total_cost)",
"min(precise.start_ts)",
"max(precise.finish_ts)",
],
orderby=None,
offset=0,
limit=len(conversation_ids),
referrer=Referrer.API_AI_CONVERSATIONS_COMPLETE.value,
config=SearchResolverConfig(auto_fields=True),
sampling_mode="HIGHEST_ACCURACY",
)
logger.info(
"[ai-conversations] Got complete aggregations for conversations",
extra={"results": json.dumps(results)},
)
return cast(dict[str, Any], results)
def _get_enrichment_data(self, snuba_params, conversation_ids: list[str]) -> dict[str, Any]:
"""
Get enrichment data (flows and trace IDs) for conversations (query 3).
"""
logger.info(
"[ai-conversations] Enriching conversations",
extra={"conversation_ids": conversation_ids},
)
all_spans_results = Spans.run_table_query(
params=snuba_params,
query_string=f"gen_ai.conversation.id:[{','.join(conversation_ids)}]",
selected_columns=[
"gen_ai.conversation.id",
"span.op",
"gen_ai.agent.name",
"trace",
"precise.start_ts",
],
orderby=["precise.start_ts"],
offset=0,
limit=10000,
referrer=Referrer.API_AI_CONVERSATIONS_ENRICHMENT.value,
config=SearchResolverConfig(auto_fields=True),
sampling_mode="HIGHEST_ACCURACY",
)
logger.info(
"[ai-conversations] Got all spans results",
extra={"all_spans_results": json.dumps(all_spans_results)},
)
return cast(dict[str, Any], all_spans_results)
def _apply_enrichment(self, conversations: list[dict], enrichment_data: dict) -> None:
"""
Apply enrichment data (flows and trace IDs) to conversations.
"""
flows_by_conversation = defaultdict(list)
traces_by_conversation = defaultdict(set)
logger.info(
"[ai-conversations] Collecting traces and flows",
extra={"enrichment_data": json.dumps(enrichment_data)},
)
for row in enrichment_data.get("data", []):
conv_id = row.get("gen_ai.conversation.id", "")
if not conv_id:
continue
# Collect trace IDs
trace_id = row.get("trace", "")
if trace_id:
traces_by_conversation[conv_id].add(trace_id)
# Collect agent flow (only from invoke_agent spans)
if row.get("span.op") == "gen_ai.invoke_agent":
agent_name = row.get("gen_ai.agent.name", "")
if agent_name:
flows_by_conversation[conv_id].append(agent_name)
for conversation in conversations:
conv_id = conversation["conversationId"]
traces = traces_by_conversation.get(conv_id, set())
conversation["flow"] = flows_by_conversation.get(conv_id, [])
conversation["traceIds"] = list(traces)
conversation["traceCount"] = len(traces)
logger.info(
"[ai-conversations] Enriched conversations",
extra={"conversations": json.dumps(conversations)},
)
|
OrganizationAIConversationsEndpoint
|
python
|
encode__django-rest-framework
|
tests/test_response.py
|
{
"start": 9267,
"end": 10766
}
|
class ____(TestCase):
"""
Covers #807
"""
def test_does_not_append_charset_by_default(self):
"""
Renderers don't include a charset unless set explicitly.
"""
headers = {"HTTP_ACCEPT": RendererA.media_type}
resp = self.client.get('/', **headers)
expected = "{}; charset={}".format(RendererA.media_type, 'utf-8')
self.assertEqual(expected, resp['Content-Type'])
def test_if_there_is_charset_specified_on_renderer_it_gets_appended(self):
"""
If renderer class has charset attribute declared, it gets appended
to Response's Content-Type
"""
headers = {"HTTP_ACCEPT": RendererC.media_type}
resp = self.client.get('/', **headers)
expected = f"{RendererC.media_type}; charset={RendererC.charset}"
self.assertEqual(expected, resp['Content-Type'])
def test_content_type_set_explicitly_on_response(self):
"""
The content type may be set explicitly on the response.
"""
headers = {"HTTP_ACCEPT": RendererC.media_type}
resp = self.client.get('/setbyview', **headers)
self.assertEqual('setbyview', resp['Content-Type'])
def test_form_has_label_and_help_text(self):
resp = self.client.get('/html_new_model')
self.assertEqual(resp['Content-Type'], 'text/html; charset=utf-8')
# self.assertContains(resp, 'Text comes here')
# self.assertContains(resp, 'Text description.')
|
Issue807Tests
|
python
|
getsentry__sentry
|
tests/sentry/tasks/test_daily_summary.py
|
{
"start": 1453,
"end": 44186
}
|
class ____(
OutcomesSnubaTest, SnubaTestCase, PerformanceIssueTestCase, SlackActivityNotificationTest
):
def store_event_and_outcomes(
self,
project_id,
timestamp,
fingerprint,
category,
release=None,
resolve=True,
level="error",
):
if category == DataCategory.ERROR:
data = {
"timestamp": timestamp.isoformat(),
"fingerprint": [fingerprint],
"level": level,
"exception": {
"values": [
{
"type": "IntegrationError",
"value": "Identity not found.",
}
]
},
}
if release:
data["release"] = release
event = self.store_event(
data=data,
project_id=project_id,
assert_no_errors=False,
default_event_type=EventType.DEFAULT,
)
elif category == DataCategory.TRANSACTION:
event = self.create_performance_issue()
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": project_id,
"outcome": Outcome.ACCEPTED,
"category": category,
"timestamp": timestamp,
"key_id": 1,
},
num_times=1,
)
group = event.group
if resolve:
group.status = GroupStatus.RESOLVED
group.substatus = None
group.resolved_at = timestamp + timedelta(minutes=1)
group.save()
return group
def setUp(self) -> None:
responses.add_passthru(settings.SENTRY_SNUBA)
super().setUp()
self.now = datetime.now(UTC)
self.two_hours_ago = self.now - timedelta(hours=2)
self.two_days_ago = self.now - timedelta(days=2)
self.three_days_ago = self.now - timedelta(days=3)
self.project.first_event = self.three_days_ago
self.project.save()
self.project2 = self.create_project(
name="foo", organization=self.organization, teams=[self.team]
)
self.project2.first_event = self.three_days_ago
user_option_service.set_option(user_id=self.user.id, key="timezone", value="Etc/GMT+8")
self.release = self.create_release(project=self.project, date_added=self.now)
def populate_event_data(
self, use_release=True, performance_issues=True, regressed_issue=True, escalated_issue=True
):
for _ in range(6):
self.group1 = self.store_event_and_outcomes(
self.project.id,
self.three_days_ago,
fingerprint="group-1",
category=DataCategory.ERROR,
resolve=False,
)
for _ in range(4):
self.store_event_and_outcomes(
self.project.id,
self.two_days_ago,
fingerprint="group-1",
category=DataCategory.ERROR,
resolve=False,
)
for _ in range(3):
self.store_event_and_outcomes(
self.project.id,
self.now,
fingerprint="group-1",
category=DataCategory.ERROR,
resolve=False,
)
# create an issue first seen in the release and set it to regressed
for _ in range(2):
self.group2 = self.store_event_and_outcomes(
self.project.id,
self.now,
fingerprint="group-2",
category=DataCategory.ERROR,
release=self.release.version if use_release else None,
resolve=False,
)
if regressed_issue:
self.group2.substatus = GroupSubStatus.REGRESSED
self.group2.save()
Activity.objects.create_group_activity(
self.group2,
ActivityType.SET_REGRESSION,
data={
"event_id": self.group2.get_latest_event().event_id,
"version": self.release.version,
},
)
# create an issue and set it to escalating
for _ in range(10):
self.group3 = self.store_event_and_outcomes(
self.project.id,
self.now,
fingerprint="group-3",
category=DataCategory.ERROR,
release=self.release.version if use_release else None,
resolve=False,
)
if escalated_issue:
self.group3.substatus = GroupSubStatus.ESCALATING
self.group3.save()
Activity.objects.create_group_activity(
self.group3,
ActivityType.SET_ESCALATING,
data={
"event_id": self.group3.get_latest_event().event_id,
"version": self.release.version,
},
)
# store an event in another project to be sure they're in separate buckets
for _ in range(2):
self.group4 = self.store_event_and_outcomes(
self.project2.id,
self.now,
fingerprint="group-4",
category=DataCategory.ERROR,
resolve=False,
)
if performance_issues:
# store some performance issues
self.perf_event = self.create_performance_issue(
fingerprint=f"{PerformanceNPlusOneGroupType.type_id}-group5"
)
self.perf_event2 = self.create_performance_issue(
fingerprint=f"{PerformanceNPlusOneGroupType.type_id}-group6"
)
assert self.perf_event.group is not None
assert self.perf_event2.group is not None
@with_feature("organizations:daily-summary")
@mock.patch("sentry.tasks.summaries.daily_summary.prepare_summary_data")
def test_schedule_organizations(self, mock_prepare_summary_data: mock.MagicMock) -> None:
user2 = self.create_user()
self.create_member(teams=[self.team], user=user2, organization=self.organization)
with self.tasks():
schedule_organizations(timestamp=self.now.timestamp())
# user2's local timezone is UTC and therefore it isn't sent now
assert mock_prepare_summary_data.delay.call_count == 1
for call_args in mock_prepare_summary_data.delay.call_args_list:
assert call_args.args == (
self.now.timestamp(),
ONE_DAY,
self.organization.id,
[self.user.id],
)
@with_feature("organizations:daily-summary")
@mock.patch("sentry.tasks.summaries.daily_summary.prepare_summary_data")
def test_schedule_organizations_timing(self, mock_prepare_summary_data: mock.MagicMock) -> None:
with self.tasks(), freeze_time("2024-03-06 23:15:00"): # 3:15PM PST
schedule_organizations()
assert mock_prepare_summary_data.delay.call_count == 0
with self.tasks(), freeze_time("2024-03-07 00:00:00"): # 4PM PST
schedule_organizations()
assert mock_prepare_summary_data.delay.call_count == 1
with self.tasks(), freeze_time("2024-03-07 01:00:00"): # 5PM PST
schedule_organizations()
assert (
mock_prepare_summary_data.delay.call_count == 1
) # note this didn't fire again, it just didn't increase from before
@pytest.mark.skip(reason="test is failing, but relevant feature is disabled")
def test_build_summary_data(self) -> None:
self.populate_event_data()
# add another release to make sure new issues in multiple releases show up
release2 = self.create_release(project=self.project, date_added=self.now)
for _ in range(2):
release2_group = self.store_event_and_outcomes(
self.project.id,
self.now,
fingerprint="group-12",
category=DataCategory.ERROR,
release=release2.version,
resolve=False,
)
summary = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
project_id = self.project.id
project_context_map = cast(
DailySummaryProjectContext, summary.projects_context_map[project_id]
)
assert project_context_map.total_today == 17 # total outcomes from today
assert project_context_map.comparison_period_avg == 1
assert len(project_context_map.key_errors_by_group) == 3
assert (self.group1, 3) in project_context_map.key_errors_by_group
assert (self.group2, 2) in project_context_map.key_errors_by_group
assert (self.group3, 10) in project_context_map.key_errors_by_group
assert len(project_context_map.key_performance_issues) == 2
assert (self.perf_event.group, 1) in project_context_map.key_performance_issues
assert (self.perf_event2.group, 1) in project_context_map.key_performance_issues
assert project_context_map.escalated_today == [self.group3]
assert project_context_map.regressed_today == [self.group2]
assert len(project_context_map.new_in_release) == 2
assert self.group2 in project_context_map.new_in_release[self.release.id]
assert self.group3 in project_context_map.new_in_release[self.release.id]
assert release2_group in project_context_map.new_in_release[release2.id]
project_id2 = self.project2.id
project_context_map2 = cast(
DailySummaryProjectContext, summary.projects_context_map[project_id2]
)
assert project_context_map2.total_today == 2
assert project_context_map2.comparison_period_avg == 0
assert project_context_map2.key_errors_by_group == [(self.group4, 2)]
assert project_context_map2.key_performance_issues == []
assert project_context_map2.escalated_today == []
assert project_context_map2.regressed_today == []
assert project_context_map2.new_in_release == {}
@pytest.mark.skip(reason="flaky and part of a dead project")
def test_build_summary_data_filter_to_unresolved(self) -> None:
for _ in range(3):
group1 = self.store_event_and_outcomes(
self.project.id,
self.now,
fingerprint="group-1",
category=DataCategory.ERROR,
resolve=False,
)
for _ in range(3):
group2 = self.store_event_and_outcomes(
self.project.id,
self.now,
fingerprint="group-2",
category=DataCategory.ERROR,
resolve=False,
)
for _ in range(3):
self.store_event_and_outcomes(
self.project.id,
self.now,
fingerprint="group-3",
category=DataCategory.ERROR,
resolve=True,
)
summary = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
project_id = self.project.id
project_context_map = cast(
DailySummaryProjectContext, summary.projects_context_map[project_id]
)
assert project_context_map.total_today == 9 # total outcomes from today
assert project_context_map.comparison_period_avg == 0
assert len(project_context_map.key_errors_by_group) == 2
assert (group1, 3) in project_context_map.key_errors_by_group
assert (group2, 3) in project_context_map.key_errors_by_group
@pytest.mark.skip(reason="flaky and part of a dead project")
def test_build_summary_data_filter_to_error_level(self) -> None:
"""Test that non-error level issues are filtered out of the results"""
for _ in range(3):
group1 = self.store_event_and_outcomes(
self.project.id,
self.now,
fingerprint="group-1",
category=DataCategory.ERROR,
resolve=False,
level="info",
)
group2 = self.store_event_and_outcomes(
self.project.id,
self.now,
fingerprint="group-2",
category=DataCategory.ERROR,
resolve=False,
)
group3 = self.store_event_and_outcomes(
self.project.id,
self.now,
fingerprint="group-3",
category=DataCategory.ERROR,
resolve=False,
)
summary = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
project_id = self.project.id
project_context_map = cast(
DailySummaryProjectContext, summary.projects_context_map[project_id]
)
assert project_context_map.total_today == 9 # total outcomes from today
assert project_context_map.comparison_period_avg == 0
assert len(project_context_map.key_errors_by_group) == 2
assert (group1, 3) not in project_context_map.key_errors_by_group
assert (group2, 3) in project_context_map.key_errors_by_group
assert (group3, 3) in project_context_map.key_errors_by_group
def test_build_summary_data_dedupes_groups(self) -> None:
"""
Test that if a group has multiple escalated and/or regressed activity rows, we only use the group once
"""
self.populate_event_data()
self.group2.status = GroupStatus.UNRESOLVED
self.group2.substatus = GroupSubStatus.REGRESSED
self.group2.save()
Activity.objects.create_group_activity(
self.group2,
ActivityType.SET_REGRESSION,
data={
"event_id": self.group2.get_latest_event().event_id,
"version": self.release.version,
},
)
Activity.objects.create_group_activity(
self.group3,
ActivityType.SET_ESCALATING,
data={
"event_id": self.group3.get_latest_event().event_id,
"version": self.release.version,
},
)
summary = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
project_id = self.project.id
project_context_map = cast(
DailySummaryProjectContext, summary.projects_context_map[project_id]
)
assert project_context_map.escalated_today == [self.group3]
assert project_context_map.regressed_today == [self.group2]
def test_build_summary_data_group_regressed_and_escalated(self) -> None:
"""
Test that if a group has regressed and then escalated in the same day, we only list it once as escalating
"""
self.populate_event_data()
Activity.objects.create_group_activity(
self.group2,
ActivityType.SET_ESCALATING,
data={
"event_id": self.group2.get_latest_event().event_id,
"version": self.release.version,
},
)
self.group2.substatus = GroupSubStatus.ESCALATING
self.group2.save()
summary = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
project_id = self.project.id
project_context_map = cast(
DailySummaryProjectContext, summary.projects_context_map[project_id]
)
assert project_context_map.escalated_today == [self.group3, self.group2]
assert project_context_map.regressed_today == []
def test_build_summary_data_group_regressed_twice_and_escalated(self) -> None:
"""
Test that if a group has regressed, been resolved, regresssed again and then escalated in the same day, we only list it once as escalating
"""
self.populate_event_data()
self.group2.status = GroupStatus.RESOLVED
self.group2.substatus = None
self.group2.resolved_at = self.now + timedelta(minutes=1)
self.group2.save()
Activity.objects.create_group_activity(
self.group2,
ActivityType.SET_REGRESSION,
data={
"event_id": self.group2.get_latest_event().event_id,
"version": self.release.version,
},
)
self.group2.status = GroupStatus.UNRESOLVED
self.group2.substatus = GroupSubStatus.REGRESSED
self.group2.save()
Activity.objects.create_group_activity(
self.group2,
ActivityType.SET_ESCALATING,
data={
"event_id": self.group2.get_latest_event().event_id,
"version": self.release.version,
},
)
self.group2.substatus = GroupSubStatus.ESCALATING
self.group2.save()
summary = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
project_id = self.project.id
project_context_map = cast(
DailySummaryProjectContext, summary.projects_context_map[project_id]
)
assert project_context_map.escalated_today == [self.group3, self.group2]
assert project_context_map.regressed_today == []
def test_build_summary_data_group_regressed_escalated_in_the_past(self) -> None:
"""
Test that if a group has regressed or escalated some time in the past over 24 hours ago, it does not show up.
"""
for _ in range(2):
regressed_past_group = self.store_event_and_outcomes(
self.project.id,
self.three_days_ago,
fingerprint="group-12",
category=DataCategory.ERROR,
resolve=False,
)
for _ in range(2):
escalated_past_group = self.store_event_and_outcomes(
self.project.id,
self.three_days_ago,
fingerprint="group-13",
category=DataCategory.ERROR,
resolve=False,
)
with freeze_time(self.two_days_ago):
Activity.objects.create_group_activity(
regressed_past_group,
ActivityType.SET_REGRESSION,
data={
"event_id": regressed_past_group.get_latest_event().event_id,
},
)
Activity.objects.create_group_activity(
escalated_past_group,
ActivityType.SET_ESCALATING,
data={
"event_id": escalated_past_group.get_latest_event().event_id,
},
)
summary = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
project_id = self.project.id
project_context_map = cast(
DailySummaryProjectContext, summary.projects_context_map[project_id]
)
assert regressed_past_group not in project_context_map.regressed_today
assert escalated_past_group not in project_context_map.escalated_today
@mock.patch("sentry.tasks.summaries.daily_summary.deliver_summary")
def test_prepare_summary_data(self, mock_deliver_summary: mock.MagicMock) -> None:
"""Test that if the summary has data in it, we pass it along to be sent"""
self.populate_event_data()
with self.tasks():
prepare_summary_data(
self.now.timestamp(), ONE_DAY, self.organization.id, [self.user.id]
)
assert mock_deliver_summary.call_count == 1
@mock.patch("sentry.tasks.summaries.daily_summary.deliver_summary")
def test_no_data_summary_doesnt_send(self, mock_deliver_summary: mock.MagicMock) -> None:
"""Test that if the summary has no data in it, we don't even try to send it"""
with self.tasks():
prepare_summary_data(
self.now.timestamp(), ONE_DAY, self.organization.id, [self.user.id]
)
assert mock_deliver_summary.call_count == 0
@mock.patch("sentry.notifications.notifications.base.BaseNotification.send")
def test_deliver_summary(self, mock_send: mock.MagicMock) -> None:
self.populate_event_data()
summary = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
with self.tasks():
deliver_summary(summary, [self.user.id])
assert mock_send.call_count == 1
def test_build_top_projects_map(self) -> None:
self.populate_event_data()
project3 = self.create_project(
name="barf", organization=self.organization, teams=[self.team]
)
project3.first_event = self.three_days_ago
for _ in range(15):
self.store_event_and_outcomes(
project3.id,
self.now,
fingerprint="group-1",
category=DataCategory.ERROR,
)
context = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(context, self.user.id)
assert list(top_projects_context_map.keys()) == [self.project.id, project3.id]
def test_user_scoped_projects(self) -> None:
"""Test that if an org has several projects but a user is only in project teams for 2, we only show data for those 2"""
self.populate_event_data()
team2 = self.create_team(organization=self.organization)
project3 = self.create_project(name="meow", organization=self.organization, teams=[team2])
project3.first_event = self.three_days_ago
# make the event count higher than self.project and self.project2
for _ in range(15):
self.store_event_and_outcomes(
project3.id,
self.now,
fingerprint="group-1",
category=DataCategory.ERROR,
)
project4 = self.create_project(name="woof", organization=self.organization, teams=[team2])
project4.first_event = self.three_days_ago
for _ in range(15):
self.store_event_and_outcomes(
project4.id,
self.now,
fingerprint="group-1",
category=DataCategory.ERROR,
)
user2 = self.create_user()
self.create_member(teams=[self.team], user=user2, organization=self.organization)
context = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(context, user2.id)
assert list(top_projects_context_map.keys()) == [self.project.id, self.project2.id]
def test_slack_notification_contents(self) -> None:
self.populate_event_data()
ctx = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(ctx, self.user.id)
with self.tasks():
DailySummaryNotification(
organization=ctx.organization,
recipient=Actor.from_object(self.user),
provider=ExternalProviders.SLACK,
project_context=top_projects_context_map,
).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
link_text = "http://testserver/organizations/baz/issues/{}/?referrer=daily_summary-slack"
assert fallback_text == f"Daily Summary for Your {self.organization.slug.title()} Projects"
assert f":bell: *{fallback_text}*" in blocks[0]["text"]["text"]
assert (
"Your comprehensive overview for today - key issues, performance insights, and more."
in blocks[0]["text"]["text"]
)
assert f"*{self.project.slug}*" in blocks[2]["text"]["text"]
# check the today's event count section
assert "*Today’s Event Count*" in blocks[3]["fields"][0]["text"]
assert "higher than last 14d avg" in blocks[3]["fields"][1]["text"]
# check the new in release section
assert ":rocket:" in blocks[4]["fields"][0]["text"]
assert self.release.version in blocks[4]["fields"][0]["text"]
assert link_text.format(self.group2.id) in blocks[4]["fields"][1]["text"]
assert link_text.format(self.group3.id) in blocks[4]["fields"][1]["text"]
# check error issues
assert "*Today's Top 3 Error Issues" in blocks[5]["fields"][0]["text"]
assert link_text.format(self.group1.id) in blocks[5]["fields"][0]["text"]
assert "\n`Identity not found.`" in blocks[5]["fields"][0]["text"]
assert link_text.format(self.group2.id) in blocks[5]["fields"][0]["text"]
assert link_text.format(self.group2.id) in blocks[5]["fields"][0]["text"]
# check performance issues
assert "*Today's Top 3 Performance Issues*" in blocks[5]["fields"][1]["text"]
assert link_text.format(self.perf_event.group.id) in blocks[5]["fields"][1]["text"]
assert "\n`db - SELECT books_author.id, b...`" in blocks[5]["fields"][1]["text"]
assert link_text.format(self.perf_event2.group.id) in blocks[5]["fields"][1]["text"]
# check escalated or regressed issues
assert "*Issues that escalated today*" in blocks[6]["fields"][0]["text"]
assert link_text.format(self.group3.id) in blocks[6]["fields"][0]["text"]
assert "*Issues that regressed today*" in blocks[6]["fields"][1]["text"]
assert link_text.format(self.group2.id) in blocks[6]["fields"][1]["text"]
# repeat above for second project
assert self.project2.slug in blocks[8]["text"]["text"]
assert "*Today’s Event Count*" in blocks[3]["fields"][0]["text"]
assert "*Today's Top 3 Error Issues" in blocks[10]["fields"][0]["text"]
assert link_text.format(self.group4.id) in blocks[10]["fields"][0]["text"]
# check footer
assert "Getting this at a funky time?" in blocks[12]["elements"][0]["text"]
assert (
"<http://testserver/settings/account/|*Account Settings*>"
in blocks[12]["elements"][0]["text"]
)
@with_feature("organizations:discover")
def test_slack_notification_contents_discover_link(self) -> None:
self.populate_event_data()
ctx = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(ctx, self.user.id)
with self.tasks():
DailySummaryNotification(
organization=ctx.organization,
recipient=Actor.from_object(self.user),
provider=ExternalProviders.SLACK,
project_context=top_projects_context_map,
).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
query_params = {
"field": ["title", "event.type", "project", "user.display", "timestamp"],
"name": "All Events",
"project": self.project.id,
"query": "event.type:error",
"sort": "-timestamp",
"statsPeriod": "24h",
"yAxis": "count()",
}
query_string = urlencode(query_params, doseq=True)
assert fallback_text == f"Daily Summary for Your {self.organization.slug.title()} Projects"
assert f":bell: *{fallback_text}*" in blocks[0]["text"]["text"]
assert (
"Your comprehensive overview for today - key issues, performance insights, and more."
in blocks[0]["text"]["text"]
)
assert f"*{self.project.slug}*" in blocks[2]["text"]["text"]
# check the today's event count section
assert "*Today’s Event Count*" in blocks[3]["fields"][0]["text"]
assert (
f"/organizations/{self.organization.slug}/discover/homepage/?{query_string}"
in blocks[3]["fields"][0]["text"]
)
assert "higher than last 14d avg" in blocks[3]["fields"][1]["text"]
def test_slack_notification_contents_newline(self) -> None:
type_string = '"""\nTraceback (most recent call last):\nFile /\'/usr/hb/meow/\''
data = {
"timestamp": self.now.isoformat(),
"fingerprint": ["group-5"],
"exception": {
"values": [
{
"type": "WorkerLostError",
"value": type_string,
}
]
},
}
self.store_event(
data=data,
project_id=self.project.id,
assert_no_errors=False,
default_event_type=EventType.DEFAULT,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": Outcome.ACCEPTED,
"category": DataCategory.ERROR,
"timestamp": self.now,
"key_id": 1,
},
num_times=1,
)
ctx = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(ctx, self.user.id)
with self.tasks():
DailySummaryNotification(
organization=ctx.organization,
recipient=Actor.from_object(self.user),
provider=ExternalProviders.SLACK,
project_context=top_projects_context_map,
).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
assert '""" Traceback (most recent call las...' in blocks[4]["fields"][0]["text"]
def test_slack_notification_contents_newline_no_attachment_text(self) -> None:
data = {
"timestamp": self.now.isoformat(),
"fingerprint": ["group-5"],
"exception": {
"values": [
{
"type": "WorkerLostError",
"value": None,
}
]
},
}
self.store_event(
data=data,
project_id=self.project.id,
assert_no_errors=False,
default_event_type=EventType.DEFAULT,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": Outcome.ACCEPTED,
"category": DataCategory.ERROR,
"timestamp": self.now,
"key_id": 1,
},
num_times=1,
)
ctx = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(ctx, self.user.id)
with self.tasks():
DailySummaryNotification(
organization=ctx.organization,
recipient=Actor.from_object(self.user),
provider=ExternalProviders.SLACK,
project_context=top_projects_context_map,
).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
assert "" in blocks[4]["fields"][0]["text"]
def test_slack_notification_contents_truncate_text(self) -> None:
data = {
"timestamp": self.now.isoformat(),
"fingerprint": ["group-5"],
"exception": {
"values": [
{
"type": "OperationalErrorThatIsVeryLongForSomeReasonOhMy",
"value": "QueryCanceled('canceling statement due to user request\n')",
}
]
},
}
self.store_event(
data=data,
project_id=self.project.id,
assert_no_errors=False,
default_event_type=EventType.DEFAULT,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": Outcome.ACCEPTED,
"category": DataCategory.ERROR,
"timestamp": self.now,
"key_id": 1,
},
num_times=1,
)
ctx = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(ctx, self.user.id)
with self.tasks():
DailySummaryNotification(
organization=ctx.organization,
recipient=Actor.from_object(self.user),
provider=ExternalProviders.SLACK,
project_context=top_projects_context_map,
).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
assert "OperationalErrorThatIsVeryLongForSo..." in blocks[4]["fields"][0]["text"]
assert "QueryCanceled('canceling statement ..." in blocks[4]["fields"][0]["text"]
def test_limit_to_two_projects(self) -> None:
"""Test that if we have data for more than 2 projects that we only show data for the top 2"""
self.populate_event_data()
project3 = self.create_project(
name="barf", organization=self.organization, teams=[self.team]
)
project3.first_event = self.three_days_ago
project3.save()
for _ in range(15):
self.store_event_and_outcomes(
project3.id,
self.now,
fingerprint="group-1",
category=DataCategory.ERROR,
resolve=False,
)
context = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(context, self.user.id)
with self.tasks():
DailySummaryNotification(
organization=context.organization,
recipient=Actor.from_object(self.user),
provider=ExternalProviders.SLACK,
project_context=top_projects_context_map,
).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
assert len(blocks) == 13
def test_no_release_data(self) -> None:
"""
Test that the notification formats as expected when we don't have release data
"""
self.populate_event_data(use_release=False)
ctx = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(ctx, self.user.id)
with self.tasks():
DailySummaryNotification(
organization=ctx.organization,
recipient=Actor.from_object(self.user),
provider=ExternalProviders.SLACK,
project_context=top_projects_context_map,
).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
assert f"*{self.project.slug}*" in blocks[2]["text"]["text"]
# check that we skip ahead to the today's event count section
# if we had release data, it would be here instead
assert "*Today’s Event Count*" in blocks[3]["fields"][0]["text"]
assert "higher than last 14d avg" in blocks[3]["fields"][1]["text"]
def test_no_performance_issues(self) -> None:
"""
Test that the notification formats as expected when we don't have performance issues
"""
self.populate_event_data(performance_issues=False)
ctx = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(ctx, self.user.id)
with self.tasks():
DailySummaryNotification(
organization=ctx.organization,
recipient=Actor.from_object(self.user),
provider=ExternalProviders.SLACK,
project_context=top_projects_context_map,
).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
link_text = "http://testserver/organizations/baz/issues/{}/?referrer=daily_summary-slack"
# check the today's event count section
assert "*Today’s Event Count*" in blocks[3]["fields"][0]["text"]
assert "higher than last 14d avg" in blocks[3]["fields"][1]["text"]
# check the new in release section
assert ":rocket:" in blocks[4]["fields"][0]["text"]
assert self.release.version in blocks[4]["fields"][0]["text"]
assert link_text.format(self.group2.id) in orjson.dumps(blocks[4]["fields"]).decode("utf-8")
assert link_text.format(self.group3.id) in orjson.dumps(blocks[4]["fields"]).decode("utf-8")
# check error issues
assert "*Today's Top 3 Error Issues" in blocks[5]["fields"][0]["text"]
assert link_text.format(self.group1.id) in blocks[5]["fields"][0]["text"]
assert link_text.format(self.group2.id) in blocks[5]["fields"][0]["text"]
assert link_text.format(self.group3.id) in blocks[5]["fields"][0]["text"]
# check escalated or regressed issues
assert "*Issues that escalated today*" in blocks[6]["fields"][0]["text"]
assert link_text.format(self.group3.id) in blocks[6]["fields"][0]["text"]
assert "*Issues that regressed today*" in blocks[6]["fields"][1]["text"]
assert link_text.format(self.group2.id) in blocks[6]["fields"][1]["text"]
# repeat above for second project, skipping where performance issue info would be
assert self.project2.slug in blocks[8]["text"]["text"]
assert "*Today’s Event Count*" in blocks[9]["fields"][0]["text"]
assert "*Today's Top 3 Error Issues" in blocks[10]["fields"][0]["text"]
assert link_text.format(self.group4.id) in blocks[10]["fields"][0]["text"]
# check footer
assert "Getting this at a funky time?" in blocks[12]["elements"][0]["text"]
def test_no_escalated_regressed_issues(self) -> None:
"""
Test that the notification formats as expected when we don't have escalated and/or regressed issues
"""
self.populate_event_data(regressed_issue=False, escalated_issue=False)
ctx = build_summary_data(
timestamp=self.now.timestamp(),
duration=ONE_DAY,
organization=self.organization,
daily=True,
)
top_projects_context_map = build_top_projects_map(ctx, self.user.id)
with self.tasks():
DailySummaryNotification(
organization=ctx.organization,
recipient=Actor.from_object(self.user),
provider=ExternalProviders.SLACK,
project_context=top_projects_context_map,
).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
link_text = "http://testserver/organizations/baz/issues/{}/?referrer=daily_summary-slack"
assert f"*{self.project.slug}*" in blocks[2]["text"]["text"]
# check the today's event count section
assert "*Today’s Event Count*" in blocks[3]["fields"][0]["text"]
assert "higher than last 14d avg" in blocks[3]["fields"][1]["text"]
# check the new in release section
assert ":rocket:" in blocks[4]["fields"][0]["text"]
assert self.release.version in blocks[4]["fields"][0]["text"]
assert link_text.format(self.group2.id) in blocks[4]["fields"][0]["text"]
assert link_text.format(self.group3.id) in blocks[4]["fields"][1]["text"]
# check error issues
assert "*Today's Top 3 Error Issues" in blocks[5]["fields"][0]["text"]
assert link_text.format(self.group1.id) in blocks[5]["fields"][0]["text"]
assert link_text.format(self.group2.id) in blocks[5]["fields"][0]["text"]
assert link_text.format(self.group2.id) in blocks[5]["fields"][0]["text"]
# check performance issues - skipped past escalated or regressed issues
assert "*Today's Top 3 Performance Issues*" in blocks[5]["fields"][1]["text"]
assert link_text.format(self.perf_event.group.id) in blocks[5]["fields"][1]["text"]
assert link_text.format(self.perf_event2.group.id) in blocks[5]["fields"][1]["text"]
# repeat above for second project
assert self.project2.slug in blocks[7]["text"]["text"]
assert "*Today’s Event Count*" in blocks[8]["fields"][0]["text"]
assert "*Today's Top 3 Error Issues" in blocks[9]["fields"][0]["text"]
assert link_text.format(self.group4.id) in blocks[9]["fields"][0]["text"]
# check footer
assert "Getting this at a funky time?" in blocks[11]["elements"][0]["text"]
|
DailySummaryTest
|
python
|
ray-project__ray
|
python/ray/autoscaler/node_launch_exception.py
|
{
"start": 103,
"end": 1238
}
|
class ____(Exception):
"""A structured exception that can be thrown by a node provider during a
`create_node` call to pass additional information for observability.
"""
def __init__(
self,
category: str,
description: str,
src_exc_info: Optional[Tuple[Any, Any, Any]], # The
):
"""Args:
category: A short (<20 chars) label for the error.
description: A longer, human readable description of the error.
src_exc_info: The source exception info if applicable. This is a
tuple of (type, exception, traceback) as returned by
sys.exc_info()
"""
super().__init__(f"Node Launch Exception ({category}): {description}")
self.category = category
self.description = description
self.src_exc_info = src_exc_info
def __reduce__(self):
# NOTE: Since tracebacks can't be pickled, we'll drop the optional
# traceback if we have to serialize this object.
return (
self.__class__,
(self.category, self.description, None),
)
|
NodeLaunchException
|
python
|
tox-dev__tox
|
src/tox/config/loader/ini/__init__.py
|
{
"start": 773,
"end": 4119
}
|
class ____(StrConvert, Loader[str]):
"""Load configuration from an ini section (ini file is a string to string dictionary)."""
def __init__(
self,
section: Section,
parser: ConfigParser,
overrides: list[Override],
core_section: Section,
section_key: str | None = None,
) -> None:
self._section_proxy: SectionProxy = parser[section_key or section.key]
self._parser = parser
self.core_section = core_section
super().__init__(section, overrides)
def load_raw(self, key: str, conf: Config | None, env_name: str | None) -> str:
return self.process_raw(conf, env_name, self._section_proxy[key])
@staticmethod
def process_raw(conf: Config | None, env_name: str | None, value: str) -> str:
# strip comments
elements: list[str] = []
for line in value.split("\n"):
if not line.startswith("#"):
part = _COMMENTS.sub("", line)
elements.append(part.replace("\\#", "#"))
strip_comments = "\n".join(elements)
if conf is None: # noqa: SIM108 # conf is None when we're loading the global tox configuration file for the CLI
factor_filtered = strip_comments # we don't support factor and replace functionality there
else:
factor_filtered = filter_for_env(strip_comments, env_name) # select matching factors
return factor_filtered.replace("\r", "").replace("\\\n", "") # collapse explicit new-line escape
def build( # noqa: PLR0913
self,
key: str,
of_type: type[V] | UnionType,
factory: Factory[V],
conf: Config | None,
raw: str,
args: ConfigLoadArgs,
) -> V:
delay_replace = inspect.isclass(of_type) and issubclass(of_type, SetEnv)
def replacer(raw_: str, args_: ConfigLoadArgs) -> str:
if conf is None:
replaced = raw_ # no replacement supported in the core section
else:
reference_replacer = ReplaceReferenceIni(conf, self)
try:
replaced = replace(conf, reference_replacer, raw_, args_) # do replacements
except Exception as exception:
if isinstance(exception, HandledError):
raise
name = self.core_section.key if args_.env_name is None else args_.env_name
msg = f"replace failed in {name}.{key} with {exception!r}"
raise HandledError(msg) from exception
return replaced
prepared = replacer(raw, args) if not delay_replace else raw
converted = self.to(prepared, of_type, factory)
if delay_replace:
converted.use_replacer(replacer, args) # type: ignore[attr-defined] # this can be only set_env that has it
return converted
def found_keys(self) -> set[str]:
return set(self._section_proxy.keys())
def get_section(self, name: str) -> SectionProxy | None:
# needed for non tox environment replacements
if self._parser.has_section(name):
return self._parser[name]
return None
def __repr__(self) -> str:
return f"{self.__class__.__name__}(section={self._section.key}, overrides={self.overrides!r})"
|
IniLoader
|
python
|
apache__airflow
|
providers/yandex/src/airflow/providers/yandex/utils/credentials.py
|
{
"start": 914,
"end": 3487
}
|
class ____(TypedDict, total=False):
"""Credentials dict description."""
token: str
service_account_key: dict[str, str]
def get_credentials(
oauth_token: str | None = None,
service_account_json: dict | str | None = None,
service_account_json_path: str | None = None,
) -> CredentialsType:
"""
Return credentials JSON for Yandex Cloud SDK based on credentials.
Credentials will be used with this priority:
* OAuth Token
* Service Account JSON file
* Service Account JSON
* Metadata Service
:param oauth_token: OAuth Token
:param service_account_json: Service Account JSON key or dict
:param service_account_json_path: Service Account JSON key file path
:return: Credentials JSON
"""
if oauth_token:
return {"token": oauth_token}
service_account_key = get_service_account_key(
service_account_json=service_account_json,
service_account_json_path=service_account_json_path,
)
if service_account_key:
return {"service_account_key": service_account_key}
log.info("using metadata service as credentials")
return {}
def get_service_account_key(
service_account_json: dict | str | None = None,
service_account_json_path: str | None = None,
) -> dict[str, str] | None:
"""
Return Yandex Cloud Service Account key loaded from JSON string or file.
:param service_account_json: Service Account JSON key or dict
:param service_account_json_path: Service Account JSON key file path
:return: Yandex Cloud Service Account key
"""
if service_account_json_path:
with open(service_account_json_path) as infile:
service_account_json = infile.read()
if isinstance(service_account_json, dict):
return service_account_json
if service_account_json:
return json.loads(service_account_json)
return None
def get_service_account_id(
service_account_json: dict | str | None = None,
service_account_json_path: str | None = None,
) -> str | None:
"""
Return Yandex Cloud Service Account ID loaded from JSON string or file.
:param service_account_json: Service Account JSON key or dict
:param service_account_json_path: Service Account JSON key file path
:return: Yandex Cloud Service Account ID
"""
sa_key = get_service_account_key(
service_account_json=service_account_json,
service_account_json_path=service_account_json_path,
)
if sa_key:
return sa_key.get("service_account_id")
return None
|
CredentialsType
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1179006,
"end": 1179567
}
|
class ____(Generator):
"""
SequenceGenerator schema wrapper.
Parameters
----------
sequence : dict, :class:`SequenceParams`
Generate a sequence of numbers.
name : str
Provide a placeholder name and bind data at runtime.
"""
_schema = {"$ref": "#/definitions/SequenceGenerator"}
def __init__(
self,
sequence: Optional[SchemaBase | Map] = Undefined,
name: Optional[str] = Undefined,
**kwds,
):
super().__init__(sequence=sequence, name=name, **kwds)
|
SequenceGenerator
|
python
|
astropy__astropy
|
astropy/units/core.py
|
{
"start": 74351,
"end": 78288
}
|
class ____(NamedUnit, metaclass=_UnitMetaClass):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword argument controls what happens
when the string does not comply with the specified format. It may be
one of the following:
- ``'raise'``: (default) raise a `ValueError` exception.
- ``'warn'``: emit a `UnitParserWarning`, and return a unit.
- ``'silent'``: return a unit silently.
With ``'warn'`` or ``'silent'`` the parser might be able to parse the
string and return a normal unit, but if it fails then an
`UnrecognizedUnit` instance is returned.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From no arguments::
Unit()
Returns the dimensionless unit.
- The last form, which creates a new `Unit` is described in detail
below.
See also: https://docs.astropy.org/en/stable/units/
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : unit-like, optional
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
ValueError
If ``represents`` cannot be parsed as a unit, e.g., because it is
a malformed string or a |Quantity| that is not a scalar.
"""
def __init__(self, st, represents=None, doc=None, format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc, format=format)
@property
def represents(self) -> UnitBase:
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases: Collection[UnitBase] = ()) -> UnitBase:
return self._represents.decompose(bases=bases)
def is_unity(self) -> bool:
return self._represents.is_unity()
@cached_property
def _hash(self) -> int:
return hash((self.name, self._represents))
@classmethod
def _from_physical_type_id(cls, physical_type_id: PhysicalTypeID) -> UnitBase:
if len(physical_type_id) == 1 and physical_type_id[0][1] == 1:
return cls(physical_type_id[0][0])
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
return CompositeUnit(1, bases, powers, _error_check=False)
|
Unit
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/concepts/io_management/input_managers.py
|
{
"start": 4573,
"end": 5470
}
|
class ____(dg.ConfigurableIOManager):
def handle_output(self, context: dg.OutputContext, obj):
table_name = context.name
write_dataframe_to_table(name=table_name, dataframe=obj)
def load_input(self, context: dg.InputContext):
if context.upstream_output:
return read_dataframe_from_table(name=context.upstream_output.name)
@dg.input_manager
def my_subselection_input_manager():
return read_dataframe_from_table(name="table_1")
@dg.op
def op1():
"""Do stuff."""
@dg.op(ins={"dataframe": dg.In(input_manager_key="my_input_manager")})
def op2(dataframe):
"""Do stuff."""
dataframe.head()
@dg.job(
resource_defs={
"io_manager": MyIOManager(),
"my_input_manager": my_subselection_input_manager,
}
)
def my_subselection_job():
op2(op1())
# end_load_input_subset
# start_better_load_input_subset
|
MyIOManager
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/widgets/ComboBox.py
|
{
"start": 108,
"end": 7773
}
|
class ____(QtWidgets.QComboBox):
"""Extends QComboBox to add extra functionality.
* Handles dict mappings -- user selects a text key, and the ComboBox indicates
the selected value.
* Requires item strings to be unique
* Remembers selected value if list is cleared and subsequently repopulated
* setItems() replaces the items in the ComboBox and blocks signals if the
value ultimately does not change.
"""
def __init__(self, parent=None, items=None, default=None):
QtWidgets.QComboBox.__init__(self, parent)
self.currentIndexChanged.connect(self.indexChanged)
self._ignoreIndexChange = False
#self.value = default
if 'darwin' in sys.platform: ## because MacOSX can show names that are wider than the comboBox
self.setSizeAdjustPolicy(QtWidgets.QComboBox.SizeAdjustPolicy.AdjustToContents)
#self.setMinimumContentsLength(10)
self._chosenText = None
self._items = OrderedDict()
if items is not None:
self.setItems(items)
if default is not None:
self.setValue(default)
def setValue(self, value):
"""Set the selected item to the first one having the given value."""
text = None
for k,v in self._items.items():
if v == value:
text = k
break
if text is None:
raise ValueError(value)
self.setText(text)
def setText(self, text):
"""Set the selected item to the first one having the given text."""
ind = self.findText(text)
if ind == -1:
raise ValueError(text)
#self.value = value
self.setCurrentIndex(ind)
def value(self):
"""
If items were given as a list of strings, then return the currently
selected text. If items were given as a dict, then return the value
corresponding to the currently selected key. If the combo list is empty,
return None.
"""
if self.count() == 0:
return None
text = self.currentText()
return self._items[text]
def ignoreIndexChange(func):
# Decorator that prevents updates to self._chosenText
def fn(self, *args, **kwds):
prev = self._ignoreIndexChange
self._ignoreIndexChange = True
try:
ret = func(self, *args, **kwds)
finally:
self._ignoreIndexChange = prev
return ret
return fn
def blockIfUnchanged(func):
# decorator that blocks signal emission during complex operations
# and emits currentIndexChanged only if the value has actually
# changed at the end.
def fn(self, *args, **kwds):
prevVal = self.value()
blocked = self.signalsBlocked()
self.blockSignals(True)
try:
ret = func(self, *args, **kwds)
finally:
self.blockSignals(blocked)
# only emit if the value has changed
if self.value() != prevVal:
self.currentIndexChanged.emit(self.currentIndex())
return ret
return fn
@ignoreIndexChange
@blockIfUnchanged
def setItems(self, items):
"""
*items* may be a list, a tuple, or a dict.
If a dict is given, then the keys are used to populate the combo box
and the values will be used for both value() and setValue().
"""
self.clear()
self.addItems(items)
def items(self):
return self._items.copy()
def updateList(self, items):
# for backward compatibility
return self.setItems(items)
@QtCore.Slot(int)
def indexChanged(self, index):
# current index has changed; need to remember new 'chosen text'
if self._ignoreIndexChange:
return
self._chosenText = self.currentText()
def setCurrentIndex(self, index):
QtWidgets.QComboBox.setCurrentIndex(self, index)
def itemsChanged(self):
# try to set the value to the last one selected, if it is available.
if self._chosenText is not None:
try:
self.setText(self._chosenText)
except ValueError:
pass
@ignoreIndexChange
def insertItem(self, *args):
raise NotImplementedError()
#QtWidgets.QComboBox.insertItem(self, *args)
#self.itemsChanged()
@ignoreIndexChange
def insertItems(self, *args):
raise NotImplementedError()
#QtWidgets.QComboBox.insertItems(self, *args)
#self.itemsChanged()
@ignoreIndexChange
def addItem(self, *args, **kwds):
# Need to handle two different function signatures for QComboBox.addItem
try:
if isinstance(args[0], str):
text = args[0]
if len(args) == 2:
value = args[1]
else:
value = kwds.get('value', text)
else:
text = args[1]
if len(args) == 3:
value = args[2]
else:
value = kwds.get('value', text)
except IndexError:
raise TypeError("First or second argument of addItem must be a string.")
if text in self._items:
raise Exception('ComboBox already has item named "%s".' % text)
self._items[text] = value
QtWidgets.QComboBox.addItem(self, *args)
self.itemsChanged()
def setItemValue(self, name, value):
if name not in self._items:
self.addItem(name, value)
else:
self._items[name] = value
@ignoreIndexChange
@blockIfUnchanged
def addItems(self, items):
if isinstance(items, list) or isinstance(items, tuple):
texts = items
items = dict([(x, x) for x in items])
elif isinstance(items, dict):
texts = list(items.keys())
else:
raise TypeError("items argument must be list or dict or tuple (got %s)." % type(items))
for t in texts:
if t in self._items:
raise Exception('ComboBox already has item named "%s".' % t)
for k,v in items.items():
self._items[k] = v
QtWidgets.QComboBox.addItems(self, list(texts))
self.itemsChanged()
@ignoreIndexChange
def clear(self):
self._items = OrderedDict()
QtWidgets.QComboBox.clear(self)
self.itemsChanged()
def saveState(self):
ind = self.currentIndex()
data = self.itemData(ind)
#if not data.isValid():
if data is not None:
try:
if not data.isValid():
data = None
else:
data = data.toInt()[0]
except AttributeError:
pass
if data is None:
return self.itemText(ind)
else:
return data
def restoreState(self, v):
if type(v) is int:
ind = self.findData(v)
if ind > -1:
self.setCurrentIndex(ind)
return
self.setCurrentIndex(self.findText(str(v)))
def widgetGroupInterface(self):
return (self.currentIndexChanged, self.saveState, self.restoreState)
|
ComboBox
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/indexing.py
|
{
"start": 36727,
"end": 39958
}
|
class ____(Indexer):
dim_indexers: list[SliceDimIndexer]
shape: tuple[int, ...]
drop_axes: tuple[int, ...]
def __init__(
self, selection: BasicSelection, shape: tuple[int, ...], chunk_grid: ChunkGrid
) -> None:
chunk_shape = get_chunk_shape(chunk_grid)
# handle ellipsis
selection_normalized = replace_ellipsis(selection, shape)
# normalize list to array
selection_normalized = replace_lists(selection_normalized)
# setup per-dimension indexers
dim_indexers = []
for dim_sel, dim_len, dim_chunk_size in zip(
selection_normalized, shape, chunk_shape, strict=True
):
dim_numchunks = int(np.ceil(dim_len / dim_chunk_size))
if is_integer(dim_sel):
if dim_sel < 0:
dim_sel = dim_numchunks + dim_sel
start = dim_sel * dim_chunk_size
stop = start + dim_chunk_size
slice_ = slice(start, stop)
elif is_slice(dim_sel):
start = dim_sel.start if dim_sel.start is not None else 0
stop = dim_sel.stop if dim_sel.stop is not None else dim_numchunks
if dim_sel.step not in {1, None}:
raise IndexError(
"unsupported selection item for block indexing; "
f"expected integer or slice with step=1, got {type(dim_sel)!r}"
)
# Can't reuse wraparound_indices because it expects a numpy array
# We have integers here.
if start < 0:
start = dim_numchunks + start
if stop < 0:
stop = dim_numchunks + stop
start *= dim_chunk_size
stop *= dim_chunk_size
slice_ = slice(start, stop)
else:
raise IndexError(
"unsupported selection item for block indexing; "
f"expected integer or slice, got {type(dim_sel)!r}"
)
dim_indexer = SliceDimIndexer(slice_, dim_len, dim_chunk_size)
dim_indexers.append(dim_indexer)
if start >= dim_len or start < 0:
msg = f"index out of bounds for dimension with length {dim_len}"
raise BoundsCheckError(msg)
shape = tuple(s.nitems for s in dim_indexers)
object.__setattr__(self, "dim_indexers", dim_indexers)
object.__setattr__(self, "shape", shape)
object.__setattr__(self, "drop_axes", ())
def __iter__(self) -> Iterator[ChunkProjection]:
for dim_projections in itertools.product(*self.dim_indexers):
chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections)
chunk_selection = tuple(p.dim_chunk_sel for p in dim_projections)
out_selection = tuple(
p.dim_out_sel for p in dim_projections if p.dim_out_sel is not None
)
is_complete_chunk = all(p.is_complete_chunk for p in dim_projections)
yield ChunkProjection(chunk_coords, chunk_selection, out_selection, is_complete_chunk)
@dataclass(frozen=True)
|
BlockIndexer
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/ext/asyncio/session.py
|
{
"start": 59738,
"end": 60464
}
|
class ____(Generic[_AS]):
__slots__ = ("async_session", "trans")
async_session: _AS
trans: AsyncSessionTransaction
def __init__(self, async_session: _AS):
self.async_session = async_session
async def __aenter__(self) -> _AS:
self.trans = self.async_session.begin()
await self.trans.__aenter__()
return self.async_session
async def __aexit__(self, type_: Any, value: Any, traceback: Any) -> None:
async def go() -> None:
await self.trans.__aexit__(type_, value, traceback)
await self.async_session.__aexit__(type_, value, traceback)
task = asyncio.create_task(go())
await asyncio.shield(task)
|
_AsyncSessionContextManager
|
python
|
ray-project__ray
|
rllib/core/learner/tests/test_learner_group.py
|
{
"start": 9375,
"end": 14661
}
|
class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_restore_from_path_multi_rl_module_and_individual_modules(self):
"""Tests whether MultiRLModule- and single RLModule states can be restored."""
# this is expanded to more scaling modes on the release ci.
scaling_modes = ["local-cpu", "multi-gpu-ddp"]
for scaling_mode in scaling_modes:
print(f"Testing scaling mode: {scaling_mode}.")
# env will have agent ids 0 and 1
env = MultiAgentCartPole({"num_agents": 2})
config_overrides = REMOTE_CONFIGS.get(scaling_mode) or LOCAL_CONFIGS.get(
scaling_mode
)
config = BaseTestingAlgorithmConfig().update_from_dict(config_overrides)
learner_group = config.build_learner_group(env=env)
spec = config.get_multi_rl_module_spec(env=env).module_specs[
DEFAULT_MODULE_ID
]
learner_group.add_module(module_id="0", module_spec=spec)
learner_group.add_module(module_id="1", module_spec=spec)
learner_group.remove_module(DEFAULT_MODULE_ID)
module_0 = spec.build()
module_1 = spec.build()
multi_rl_module = MultiRLModule()
multi_rl_module.add_module(module_id="0", module=module_0)
multi_rl_module.add_module(module_id="1", module=module_1)
# Check if we can load just the MultiRLModule.
with tempfile.TemporaryDirectory() as tmpdir:
multi_rl_module.save_to_path(tmpdir)
old_learner_weights = learner_group.get_weights()
learner_group.restore_from_path(
tmpdir,
component=COMPONENT_LEARNER + "/" + COMPONENT_RL_MODULE,
)
# Check the weights of the module in the learner group are the
# same as the weights of the newly created MultiRLModule
check(learner_group.get_weights(), multi_rl_module.get_state())
learner_group.set_state(
{
COMPONENT_LEARNER: {COMPONENT_RL_MODULE: old_learner_weights},
}
)
check(learner_group.get_weights(), old_learner_weights)
# Check if we can load just single agent RL Modules.
with tempfile.TemporaryDirectory() as tmpdir:
module_0.save_to_path(tmpdir)
with tempfile.TemporaryDirectory() as tmpdir2:
temp_module = spec.build()
temp_module.save_to_path(tmpdir2)
old_learner_weights = learner_group.get_weights()
learner_group.restore_from_path(
tmpdir,
component=COMPONENT_LEARNER + "/" + COMPONENT_RL_MODULE + "/0",
)
learner_group.restore_from_path(
tmpdir2,
component=COMPONENT_LEARNER + "/" + COMPONENT_RL_MODULE + "/1",
)
# check the weights of the module in the learner group are the
# same as the weights of the newly created MultiRLModule
new_multi_rl_module = MultiRLModule()
new_multi_rl_module.add_module(module_id="0", module=module_0)
new_multi_rl_module.add_module(module_id="1", module=temp_module)
check(learner_group.get_weights(), new_multi_rl_module.get_state())
learner_group.set_weights(old_learner_weights)
# Check if we can first load a MultiRLModule, then a single agent RLModule
# (within that MultiRLModule). Check that the single agent RL Module is
# loaded over the matching submodule in the MultiRLModule.
with tempfile.TemporaryDirectory() as tmpdir:
module_0 = spec.build()
multi_rl_module = MultiRLModule()
multi_rl_module.add_module(module_id="0", module=module_0)
multi_rl_module.add_module(module_id="1", module=spec.build())
multi_rl_module.save_to_path(tmpdir)
with tempfile.TemporaryDirectory() as tmpdir2:
module_1 = spec.build()
module_1.save_to_path(tmpdir2)
learner_group.restore_from_path(
tmpdir,
component=COMPONENT_LEARNER + "/" + COMPONENT_RL_MODULE,
)
learner_group.restore_from_path(
tmpdir2,
component=COMPONENT_LEARNER + "/" + COMPONENT_RL_MODULE + "/1",
)
new_multi_rl_module = MultiRLModule()
new_multi_rl_module.add_module(module_id="0", module=module_0)
new_multi_rl_module.add_module(module_id="1", module=module_1)
check(learner_group.get_weights(), new_multi_rl_module.get_state())
del learner_group
|
TestLearnerGroupCheckpointRestore
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 1010084,
"end": 1010410
}
|
class ____(
sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData, TeamAuditEntryData
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("is_ldap_mapped",)
is_ldap_mapped = sgqlc.types.Field(Boolean, graphql_name="isLdapMapped")
|
TeamAddMemberAuditEntry
|
python
|
psf__black
|
tests/data/cases/type_aliases.py
|
{
"start": 393,
"end": 458
}
|
class ____:
type InClass = int
type = aliased
print(type(42))
|
X
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/events.py
|
{
"start": 76092,
"end": 79618
}
|
class ____(Request):
"""
Get 'plot' events for the given tasks
:param tasks: List of task IDs
:type tasks: Sequence[str]
:param iters: Max number of latest iterations for which to return debug images
:type iters: int
:param scroll_id: Scroll ID of previous call (used for getting more results)
:type scroll_id: str
:param no_scroll: If Truethen no scroll is created. Suitable for one time calls
:type no_scroll: bool
"""
_service = "events"
_action = "get_multi_task_plots"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"iters": {
"description": "Max number of latest iterations for which to return debug images",
"type": "integer",
},
"no_scroll": {
"default": False,
"description": "If Truethen no scroll is created. Suitable for one time calls",
"type": "boolean",
},
"scroll_id": {
"description": "Scroll ID of previous call (used for getting more results)",
"type": "string",
},
"tasks": {
"description": "List of task IDs",
"items": {"description": "Task ID", "type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(
self,
tasks: List[str],
iters: Optional[int] = None,
scroll_id: Optional[str] = None,
no_scroll: Optional[bool] = False,
**kwargs: Any
) -> None:
super(GetMultiTaskPlotsRequest, self).__init__(**kwargs)
self.tasks = tasks
self.iters = iters
self.scroll_id = scroll_id
self.no_scroll = no_scroll
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("iters")
def iters(self) -> Optional[int]:
return self._property_iters
@iters.setter
def iters(self, value: Optional[int]) -> None:
if value is None:
self._property_iters = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iters", six.integer_types)
self._property_iters = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("no_scroll")
def no_scroll(self) -> Optional[bool]:
return self._property_no_scroll
@no_scroll.setter
def no_scroll(self, value: Optional[bool]) -> None:
if value is None:
self._property_no_scroll = None
return
self.assert_isinstance(value, "no_scroll", (bool,))
self._property_no_scroll = value
|
GetMultiTaskPlotsRequest
|
python
|
plotly__plotly.py
|
plotly/graph_objs/box/marker/_line.py
|
{
"start": 233,
"end": 5612
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "box.marker"
_path_str = "box.marker.line"
_valid_props = {"color", "outliercolor", "outlierwidth", "width"}
@property
def color(self):
"""
Sets the marker.line color. It accepts either a specific color
or an array of numbers that are mapped to the colorscale
relative to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def outliercolor(self):
"""
Sets the border line color of the outlier sample points.
Defaults to marker.color
The 'outliercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outliercolor"]
@outliercolor.setter
def outliercolor(self, val):
self["outliercolor"] = val
@property
def outlierwidth(self):
"""
Sets the border line width (in px) of the outlier sample
points.
The 'outlierwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlierwidth"]
@outlierwidth.setter
def outlierwidth(self, val):
self["outlierwidth"] = val
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
outliercolor
Sets the border line color of the outlier sample
points. Defaults to marker.color
outlierwidth
Sets the border line width (in px) of the outlier
sample points.
width
Sets the width (in px) of the lines bounding the marker
points.
"""
def __init__(
self,
arg=None,
color=None,
outliercolor=None,
outlierwidth=None,
width=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.box.marker.Line`
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
outliercolor
Sets the border line color of the outlier sample
points. Defaults to marker.color
outlierwidth
Sets the border line width (in px) of the outlier
sample points.
width
Sets the width (in px) of the lines bounding the marker
points.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.box.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.box.marker.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("outliercolor", arg, outliercolor)
self._set_property("outlierwidth", arg, outlierwidth)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Line
|
python
|
python-poetry__poetry
|
tests/repositories/fixtures/pypi.org/generate.py
|
{
"start": 5982,
"end": 8300
}
|
class ____:
sha256: str = ""
md5: str = ""
KNOWN_DISTRIBUTION_HASHES = {{
{text},
}}
@pytest.fixture
def dist_hash_getter() -> DistributionHashGetter:
def get_hash(name: str) -> DistributionHash:
return KNOWN_DISTRIBUTION_HASHES.get(name, DistributionHash())
return get_hash
""",
encoding="utf-8",
)
def cleanup_legacy_html_hashes(metadata: ReleaseFileMetadata) -> None:
for index in FIXTURE_PATH_REPOSITORIES_LEGACY.glob("*.html"):
existing_content = index.read_text(encoding="utf-8")
content = re.sub(
f"{metadata.path.name}#sha256=[A-Fa-f0-9]{{64}}",
f"{metadata.path.name}#sha256={metadata.sha256}",
existing_content,
)
content = re.sub(
f'data-dist-info-metadata="sha256=[A-Fa-f0-9]{{64}}">{metadata.path.name}<',
f'data-dist-info-metadata="sha256={metadata.sha256}">{metadata.path.name}<',
content,
)
content = re.sub(
f"{metadata.path.name}#md5=[A-Fa-f0-9]{{32}}",
f"{metadata.path.name}#md5={metadata.md5}",
content,
)
content = re.sub(
f'data-dist-info-metadata="md5=[A-Fa-f0-9]{{32}}">{metadata.path.name}<',
f'data-dist-info-metadata="md5={metadata.md5}">{metadata.path.name}<',
content,
)
if existing_content != content:
logger.info("Rewriting hashes in %s", index)
index.write_text(content, encoding="utf-8")
def cleanup_installation_fixtures(metadata: ReleaseFileMetadata) -> None:
for file in FIXTURE_PATH_INSTALLATION.glob("*.test"):
original_content = file.read_text(encoding="utf-8")
content = re.sub(
f'file = "{metadata.path.name}", hash = "sha256:[A-Fa-f0-9]{{64}}"',
f'file = "{metadata.path.name}", hash = "sha256:{metadata.sha256}"',
original_content,
)
content = re.sub(
f'file = "{metadata.path.name}", hash = "md5:[A-Fa-f0-9]{{32}}"',
f'file = "{metadata.path.name}", hash = "md5:{metadata.md5}"',
content,
)
if content != original_content:
logger.info("Rewriting hashes in %s", file)
file.write_text(content, encoding="utf-8")
|
DistributionHash
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/index/sources.py
|
{
"start": 1409,
"end": 2974
}
|
class ____:
"""Scans directory and caches results"""
def __init__(self, path: str) -> None:
self._path = path
self._page_candidates: List[str] = []
self._project_name_to_urls: Dict[str, List[str]] = defaultdict(list)
self._scanned_directory = False
def _scan_directory(self) -> None:
"""Scans directory once and populates both page_candidates
and project_name_to_urls at the same time
"""
for entry in os.scandir(self._path):
url = path_to_url(entry.path)
if _is_html_file(url):
self._page_candidates.append(url)
continue
# File must have a valid wheel or sdist name,
# otherwise not worth considering as a package
try:
project_filename = parse_wheel_filename(entry.name)[0]
except InvalidWheelFilename:
try:
project_filename = parse_sdist_filename(entry.name)[0]
except InvalidSdistFilename:
continue
self._project_name_to_urls[project_filename].append(url)
self._scanned_directory = True
@property
def page_candidates(self) -> List[str]:
if not self._scanned_directory:
self._scan_directory()
return self._page_candidates
@property
def project_name_to_urls(self) -> Dict[str, List[str]]:
if not self._scanned_directory:
self._scan_directory()
return self._project_name_to_urls
|
_FlatDirectoryToUrls
|
python
|
google__pytype
|
pytype/pyc/opcodes.py
|
{
"start": 11259,
"end": 11364
}
|
class ____(OpcodeWithArg): # Arg: Number of list items
_FLAGS = HAS_ARGUMENT
__slots__ = ()
|
BUILD_LIST
|
python
|
realpython__materials
|
python-maze-solver/source_code_final/src/maze_solver/view/primitives.py
|
{
"start": 1300,
"end": 1599
}
|
class ____:
top_left: Point | None = None
def draw(self, **attributes) -> str:
if self.top_left:
attrs = attributes | {"x": self.top_left.x, "y": self.top_left.y}
else:
attrs = attributes
return tag("rect", **attrs)
@dataclass(frozen=True)
|
Rect
|
python
|
python__mypy
|
mypyc/test/test_run.py
|
{
"start": 15265,
"end": 15619
}
|
class ____(TestRun):
"""Run the main multi-module tests in multi-file compilation mode.
In multi-file mode each module gets compiled into a separate C file,
but all modules (C files) are compiled together.
"""
multi_file = True
test_name_suffix = "_multi"
files = ["run-multimodule.test", "run-mypy-sim.test"]
|
TestRunMultiFile
|
python
|
simplejson__simplejson
|
simplejson/tests/test_subclass.py
|
{
"start": 86,
"end": 190
}
|
class ____(int):
def __repr__(self):
return 'invalid json'
__str__ = __repr__
|
AlternateInt
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/overloadOverlap1.py
|
{
"start": 5394,
"end": 5471
}
|
class ____(Protocol):
def __radd__(self, other: Any, /) -> Any: ...
|
DProto1
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/forms.py
|
{
"start": 15639,
"end": 16471
}
|
class ____(ProjectForm):
"""Form used when importing a project."""
class Meta:
model = Project
fields = ("name", "repo", "default_branch", "language", "remote_repository")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["repo"].widget.attrs["placeholder"] = self.placehold_repo()
self.fields["repo"].widget.attrs["required"] = True
# Make the repo field readonly if a remote repository is given,
# since it will be derived from the remote repository.
# In the form we already populate this field with the remote repository's clone URL.
if self.initial.get("remote_repository"):
self.fields["repo"].disabled = True
self.fields["remote_repository"].widget = forms.HiddenInput()
|
ProjectBasicsForm
|
python
|
huggingface__transformers
|
src/transformers/models/cohere/modular_cohere.py
|
{
"start": 11669,
"end": 12026
}
|
class ____(LlamaModel):
def __init__(self, config: CohereConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[CohereDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
|
CohereModel
|
python
|
pytorch__pytorch
|
test/onnx/exporter/test_capture_strategies.py
|
{
"start": 290,
"end": 2202
}
|
class ____(common_utils.TestCase):
@common_utils.parametrize(
"strategy_cls",
[
_capture_strategies.TorchExportStrictStrategy,
_capture_strategies.TorchExportNonStrictStrategy,
_capture_strategies.TorchExportDraftExportStrategy,
],
name_fn=lambda strategy_cls: strategy_cls.__name__,
)
def test_jit_isinstance(self, strategy_cls):
class Model(torch.nn.Module):
def forward(self, a, b):
if torch.jit.isinstance(a, torch.Tensor):
return a.cos()
return b.sin()
model = Model()
a = torch.tensor(0.0)
b = torch.tensor(1.0)
result = strategy_cls()(model, (a, b), kwargs=None, dynamic_shapes=None)
ep = result.exported_program
assert ep is not None
torch.testing.assert_close(ep.module()(a, b), model(a, b))
def test_draft_export_on_data_dependent_model(self):
class Model(torch.nn.Module):
def forward(self, a, b):
if a.sum() > 0:
return a.cos()
# The branch is expected to be specialized and a warning is logged
return b.sin()
model = Model()
a = torch.tensor(0.0)
b = torch.tensor(1.0)
strategy = _capture_strategies.TorchExportDraftExportStrategy()
with self.assertLogs("torch.export", level="WARNING") as cm:
result = strategy(model, (a, b), kwargs=None, dynamic_shapes=None)
expected_warning = "1 issue(s) found during export, and it was not able to soundly produce a graph."
self.assertIn(expected_warning, str(cm.output))
ep = result.exported_program
assert ep is not None
torch.testing.assert_close(ep.module()(a, b), model(a, b))
if __name__ == "__main__":
common_utils.run_tests()
|
ExportStrategiesTest
|
python
|
getsentry__sentry-python
|
sentry_sdk/profiler/continuous_profiler.py
|
{
"start": 18276,
"end": 20175
}
|
class ____:
def __init__(self, options, sdk_info, buffer_size, capture_func):
# type: (Dict[str, Any], SDKInfo, int, Callable[[Envelope], None]) -> None
self.options = options
self.sdk_info = sdk_info
self.buffer_size = buffer_size
self.capture_func = capture_func
self.profiler_id = uuid.uuid4().hex
self.chunk = ProfileChunk()
# Make sure to use the same clock to compute a sample's monotonic timestamp
# to ensure the timestamps are correctly aligned.
self.start_monotonic_time = now()
# Make sure the start timestamp is defined only once per profiler id.
# This prevents issues with clock drift within a single profiler session.
#
# Subtracting the start_monotonic_time here to find a fixed starting position
# for relative monotonic timestamps for each sample.
self.start_timestamp = (
datetime.now(timezone.utc).timestamp() - self.start_monotonic_time
)
def write(self, monotonic_time, sample):
# type: (float, ExtractedSample) -> None
if self.should_flush(monotonic_time):
self.flush()
self.chunk = ProfileChunk()
self.start_monotonic_time = now()
self.chunk.write(self.start_timestamp + monotonic_time, sample)
def should_flush(self, monotonic_time):
# type: (float) -> bool
# If the delta between the new monotonic time and the start monotonic time
# exceeds the buffer size, it means we should flush the chunk
return monotonic_time - self.start_monotonic_time >= self.buffer_size
def flush(self):
# type: () -> None
chunk = self.chunk.to_json(self.profiler_id, self.options, self.sdk_info)
envelope = Envelope()
envelope.add_profile_chunk(chunk)
self.capture_func(envelope)
|
ProfileBuffer
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/tests/unit_tests/agents/test_responses.py
|
{
"start": 491,
"end": 601
}
|
class ____(BaseModel):
# No custom docstring, should have no description in tool
data: str
|
EmptyDocModel
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/tasks/mixins.py
|
{
"start": 386,
"end": 3423
}
|
class ____:
"""Mixin that handles the VCS sync/update."""
def get_version(self, version_pk):
"""
Retrieve version data from the API.
:param version_pk: version pk to sync
:type version_pk: int
:returns: a data-complete version object
:rtype: builds.models.APIVersion
"""
version_data = self.data.api_client.version(version_pk).get()
return APIVersion(**version_data)
def sync_versions(self, vcs_repository):
"""
Update tags/branches via a Celery task.
.. note::
It may trigger a new build to the stable version.
"""
# NOTE: `sync_versions` should receive `tags` and `branches` already
# and just validate them trigger the task. All the other logic should
# be done by the BuildDirector or the VCS backend. We should not
# check this here and do not depend on ``vcs_repository``.
sync_tags = not self.data.project.has_feature(Feature.SKIP_SYNC_TAGS)
sync_branches = not self.data.project.has_feature(Feature.SKIP_SYNC_BRANCHES)
try:
branches, tags = vcs_repository.lsremote(
include_tags=sync_tags,
include_branches=sync_branches,
)
except RepositoryError:
log.warning(
"Error running lsremote to get versions from the repository.",
exc_info=True,
)
return
tags_data = [
{
"identifier": v.identifier,
"verbose_name": v.verbose_name,
}
for v in tags
]
branches_data = [
{
"identifier": v.identifier,
"verbose_name": v.verbose_name,
}
for v in branches
]
log.debug("Synchronizing versions.", branches=branches, tags=tags)
self.validate_duplicate_reserved_versions(
tags_data=tags_data,
branches_data=branches_data,
)
build_tasks.sync_versions_task.delay(
project_pk=self.data.project.pk,
tags_data=tags_data,
branches_data=branches_data,
)
def validate_duplicate_reserved_versions(self, tags_data, branches_data):
"""
Check if there are duplicated names of reserved versions.
The user can't have a branch and a tag with the same name of
``latest`` or ``stable``. Raise a RepositoryError exception
if there is a duplicated name.
:param data: Dict containing the versions from tags and branches
"""
version_names = [version["verbose_name"] for version in tags_data + branches_data]
counter = Counter(version_names)
for reserved_name in [STABLE_VERBOSE_NAME, LATEST_VERBOSE_NAME]:
if counter[reserved_name] > 1:
raise RepositoryError(
RepositoryError.DUPLICATED_RESERVED_VERSIONS,
)
|
SyncRepositoryMixin
|
python
|
jazzband__django-polymorphic
|
example/pexp/models.py
|
{
"start": 440,
"end": 645
}
|
class ____(ShowFieldTypeAndContent, PolymorphicModel):
"""UUID as primary key example"""
uuid_primary_key = models.UUIDField(primary_key=True)
field1 = models.CharField(max_length=10)
|
UUIDModelA
|
python
|
getsentry__sentry
|
src/sentry/identity/vsts/provider.py
|
{
"start": 8728,
"end": 9197
}
|
class ____(OAuth2LoginView):
def get_authorize_params(self, state, redirect_uri):
params = {
"client_id": self.client_id,
"response_type": "code",
"redirect_uri": redirect_uri,
"response_mode": "query",
"scope": self.get_scope(),
"state": state,
}
if options.get("vsts.consent-prompt"):
params["prompt"] = "consent"
return params
|
VSTSOAuth2LoginView
|
python
|
coleifer__peewee
|
tests/reflection.py
|
{
"start": 1835,
"end": 15509
}
|
class ____(BaseReflectionTestCase):
requires = [ColTypes, Nullable, RelModel, FKPK, Underscores, Category,
Nugget]
def test_generate_models(self):
models = self.introspector.generate_models()
self.assertTrue(set((
'category',
'col_types',
'fkpk',
'nugget',
'nullable',
'rel_model',
'underscores')).issubset(set(models)))
def assertIsInstance(obj, klass):
self.assertTrue(isinstance(obj, klass))
category = models['category']
self.assertEqual(
sorted(category._meta.fields),
['id', 'name', 'parent'])
assertIsInstance(category.id, AutoField)
assertIsInstance(category.name, CharField)
assertIsInstance(category.parent, ForeignKeyField)
self.assertEqual(category.parent.rel_model, category)
fkpk = models['fkpk']
self.assertEqual(sorted(fkpk._meta.fields), ['col_types'])
assertIsInstance(fkpk.col_types, ForeignKeyField)
self.assertEqual(fkpk.col_types.rel_model, models['col_types'])
self.assertTrue(fkpk.col_types.primary_key)
relmodel = models['rel_model']
self.assertEqual(
sorted(relmodel._meta.fields),
['col_types', 'col_types_nullable', 'id'])
assertIsInstance(relmodel.col_types, ForeignKeyField)
assertIsInstance(relmodel.col_types_nullable, ForeignKeyField)
self.assertFalse(relmodel.col_types.null)
self.assertTrue(relmodel.col_types_nullable.null)
self.assertEqual(relmodel.col_types.rel_model,
models['col_types'])
self.assertEqual(relmodel.col_types_nullable.rel_model,
models['col_types'])
@requires_sqlite
def test_generate_models_indexes(self):
models = self.introspector.generate_models()
self.assertEqual(models['fkpk']._meta.indexes, [])
self.assertEqual(models['rel_model']._meta.indexes, [])
self.assertEqual(models['category']._meta.indexes, [])
col_types = models['col_types']
indexed = set(['f1'])
unique = set(['f10'])
for field in col_types._meta.sorted_fields:
self.assertEqual(field.index, field.name in indexed)
self.assertEqual(field.unique, field.name in unique)
indexes = col_types._meta.indexes
self.assertEqual(sorted(indexes), [
(['f10', 'f11'], True),
(['f11', 'f8', 'f13'], False),
])
def test_table_subset(self):
models = self.introspector.generate_models(table_names=[
'category',
'col_types',
'foobarbaz'])
self.assertEqual(sorted(models.keys()), ['category', 'col_types'])
@requires_sqlite
def test_sqlite_fk_re(self):
user_id_tests = [
'FOREIGN KEY("user_id") REFERENCES "users"("id")',
'FOREIGN KEY(user_id) REFERENCES users(id)',
'FOREIGN KEY ([user_id]) REFERENCES [users] ([id])',
'"user_id" NOT NULL REFERENCES "users" ("id")',
'user_id not null references users (id)',
]
fk_pk_tests = [
('"col_types_id" INTEGER NOT NULL PRIMARY KEY REFERENCES '
'"coltypes" ("f11")'),
'FOREIGN KEY ("col_types_id") REFERENCES "coltypes" ("f11")',
]
regex = SqliteMetadata.re_foreign_key
for test in user_id_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'user_id', 'users', 'id',
))
for test in fk_pk_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'col_types_id', 'coltypes', 'f11',
))
def test_make_column_name(self):
# Tests for is_foreign_key=False.
tests = (
('Column', 'column'),
('Foo_id', 'foo_id'),
('foo_id', 'foo_id'),
('foo_id_id', 'foo_id_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_id'),
('camelCase', 'camel_case'),
('ABCdefGhi', 'ab_cdef_ghi'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name), expected)
# Tests for is_foreign_key=True.
tests = (
('Foo_id', 'foo'),
('foo_id', 'foo'),
('foo_id_id', 'foo_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_'),
('camelCase', 'camel_case'),
('ABCdefGhi', 'ab_cdef_ghi'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name, True), expected)
def test_make_model_name(self):
tests = (
('Table', 'Table'),
('table', 'Table'),
('table_baz', 'TableBaz'),
('foo__bar__baz2', 'FooBarBaz2'),
('foo12_3', 'Foo123'),
)
for table_name, expected in tests:
self.assertEqual(
self.introspector.make_model_name(table_name), expected)
def test_col_types(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', (BigIntegerField, IntegerField), False),
# There do not appear to be separate constants for the blob and
# text field types in MySQL's drivers. See GH#1034.
('f2', (BlobField, TextField), False),
('f3', (BooleanField, IntegerField), False),
('f4', CharField, False),
('f5', DateField, False),
('f6', DateTimeField, False),
('f7', DecimalField, False),
('f8', (DoubleField, FloatField), False),
('f9', FloatField, False),
('f10', IntegerField, False),
('f11', AutoField, False),
('f12', TextField, False),
('f13', TimeField, False))),
('rel_model', (
('col_types_id', ForeignKeyField, False),
('col_types_nullable_id', ForeignKeyField, True))),
('nugget', (
('category_id', ForeignKeyField, False),
('category', CharField, False))),
('nullable', (
('nullable_cf', CharField, True),
('nullable_if', IntegerField, True))),
('fkpk', (
('col_types_id', ForeignKeyField, False),)),
('underscores', (
('_id', AutoField, False),
('_name', CharField, False))),
('category', (
('name', CharField, False),
('parent_id', ForeignKeyField, True))),
)
for table_name, expected_columns in expected:
introspected_columns = columns[table_name]
for field_name, field_class, is_null in expected_columns:
if not isinstance(field_class, (list, tuple)):
field_class = (field_class,)
column = introspected_columns[field_name]
self.assertTrue(column.field_class in field_class,
"%s in %s" % (column.field_class, field_class))
self.assertEqual(column.nullable, is_null)
def test_foreign_keys(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
self.assertEqual(foreign_keys['col_types'], [])
rel_model = foreign_keys['rel_model']
self.assertEqual(len(rel_model), 2)
fkpk = foreign_keys['fkpk']
self.assertEqual(len(fkpk), 1)
fkpk_fk = fkpk[0]
self.assertEqual(fkpk_fk.table, 'fkpk')
self.assertEqual(fkpk_fk.column, 'col_types_id')
self.assertEqual(fkpk_fk.dest_table, 'col_types')
self.assertEqual(fkpk_fk.dest_column, 'f11')
category = foreign_keys['category']
self.assertEqual(len(category), 1)
category_fk = category[0]
self.assertEqual(category_fk.table, 'category')
self.assertEqual(category_fk.column, 'parent_id')
self.assertEqual(category_fk.dest_table, 'category')
self.assertEqual(category_fk.dest_column, 'id')
def test_table_names(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
names = (
('col_types', 'ColTypes'),
('nullable', 'Nullable'),
('rel_model', 'RelModel'),
('fkpk', 'Fkpk'))
for k, v in names:
self.assertEqual(model_names[k], v)
def test_column_meta(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
rel_model = columns['rel_model']
col_types_id = rel_model['col_types_id']
self.assertEqual(col_types_id.get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'field': "'f11'",
})
col_types_nullable_id = rel_model['col_types_nullable_id']
self.assertEqual(col_types_nullable_id.get_field_parameters(), {
'column_name': "'col_types_nullable_id'",
'null': True,
'backref': "'col_types_col_types_nullable_set'",
'model': 'ColTypes',
'field': "'f11'",
})
fkpk = columns['fkpk']
self.assertEqual(fkpk['col_types_id'].get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'primary_key': True,
'field': "'f11'"})
category = columns['category']
parent_id = category['parent_id']
self.assertEqual(parent_id.get_field_parameters(), {
'column_name': "'parent_id'",
'null': True,
'model': "'self'",
'field': "'id'",
})
nugget = columns['nugget']
category_fk = nugget['category_id']
self.assertEqual(category_fk.name, 'category_id')
self.assertEqual(category_fk.get_field_parameters(), {
'field': "'id'",
'model': 'Category',
'column_name': "'category_id'",
})
category = nugget['category']
self.assertEqual(category.name, 'category')
def test_get_field(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', ('f1 = BigIntegerField(index=True)',
'f1 = IntegerField(index=True)')),
('f2', ('f2 = BlobField()', 'f2 = TextField()')),
('f4', 'f4 = CharField()'),
('f5', 'f5 = DateField()'),
('f6', 'f6 = DateTimeField()'),
('f7', 'f7 = DecimalField()'),
('f10', 'f10 = IntegerField(unique=True)'),
('f11', 'f11 = AutoField()'),
('f12', ('f12 = TextField()', 'f12 = BlobField()')),
('f13', 'f13 = TimeField()'),
)),
('nullable', (
('nullable_cf', 'nullable_cf = '
'CharField(null=True)'),
('nullable_if', 'nullable_if = IntegerField(null=True)'),
)),
('fkpk', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes, "
'primary_key=True)'),
)),
('nugget', (
('category_id', 'category_id = ForeignKeyField('
"column_name='category_id', field='id', model=Category)"),
('category', 'category = CharField()'),
)),
('rel_model', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes)"),
('col_types_nullable_id', 'col_types_nullable = '
"ForeignKeyField(backref='col_types_col_types_nullable_set', "
"column_name='col_types_nullable_id', field='f11', "
'model=ColTypes, null=True)'),
)),
('underscores', (
('_id', '_id = AutoField()'),
('_name', '_name = CharField()'),
)),
('category', (
('name', 'name = CharField()'),
('parent_id', 'parent = ForeignKeyField('
"column_name='parent_id', field='id', model='self', "
'null=True)'),
)),
)
for table, field_data in expected:
for field_name, fields in field_data:
if not isinstance(fields, tuple):
fields = (fields,)
actual = columns[table][field_name].get_field()
self.assertTrue(actual in fields,
'%s not in %s' % (actual, fields))
|
TestReflection
|
python
|
huggingface__transformers
|
src/transformers/models/arcee/configuration_arcee.py
|
{
"start": 1299,
"end": 8927
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ArceeModel`]. It is used to instantiate an Arcee
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the AFM-4.5B-Base.
Pre-trained weights are available at
[arcee-ai/AFM-4.5B](https://huggingface.co/arcee-ai/AFM-4.5B)
and were used to build the examples below.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Arcee model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ArceeModel`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 18432):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. AFM-4.5B-Base supports up to 16384 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 128000):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 128001):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import ArceeModel, ArceeConfig
>>> # Initializing an Arcee AFM-4.5B-Base style configuration
>>> configuration = ArceeConfig()
>>> # Initializing a model from the AFM-4.5B-Base style configuration
>>> model = ArceeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "arcee"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 32000,
hidden_size: Optional[int] = 2560,
intermediate_size: Optional[int] = 18432,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "relu2",
max_position_embeddings: Optional[int] = 4096,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 128000,
eos_token_id: Optional[int] = 128001,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
head_dim: Optional[int] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["ArceeConfig"]
|
ArceeConfig
|
python
|
numpy__numpy
|
numpy/distutils/system_info.py
|
{
"start": 53534,
"end": 54460
}
|
class ____(atlas_info):
_lib_names = ['f77blas', 'cblas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
opt = self.get_option_single('atlas_libs', 'libraries')
atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
|
atlas_blas_info
|
python
|
doocs__leetcode
|
solution/0100-0199/0130.Surrounded Regions/Solution2.py
|
{
"start": 0,
"end": 905
}
|
class ____:
def solve(self, board: List[List[str]]) -> None:
def find(x: int) -> int:
if p[x] != x:
p[x] = find(p[x])
return p[x]
m, n = len(board), len(board[0])
p = list(range(m * n + 1))
for i in range(m):
for j in range(n):
if board[i][j] == "O":
if i in (0, m - 1) or j in (0, n - 1):
p[find(i * n + j)] = find(m * n)
else:
for a, b in pairwise((-1, 0, 1, 0, -1)):
x, y = i + a, j + b
if board[x][y] == "O":
p[find(x * n + y)] = find(i * n + j)
for i in range(m):
for j in range(n):
if board[i][j] == "O" and find(i * n + j) != find(m * n):
board[i][j] = "X"
|
Solution
|
python
|
Unity-Technologies__ml-agents
|
ml-agents-envs/mlagents_envs/exception.py
|
{
"start": 632,
"end": 748
}
|
class ____(UnityException):
"""
Related to errors with sending actions.
"""
pass
|
UnityActionException
|
python
|
huggingface__transformers
|
src/transformers/models/grounding_dino/processing_grounding_dino.py
|
{
"start": 2826,
"end": 3433
}
|
class ____(dict):
message = (
"The key `labels` is will return integer ids in `GroundingDinoProcessor.post_process_grounded_object_detection` "
"output since v4.51.0. Use `text_labels` instead to retrieve string object names."
)
def __getitem__(self, key):
if key == "labels":
warnings.warn(self.message, FutureWarning)
return super().__getitem__(key)
def get(self, key, *args, **kwargs):
if key == "labels":
warnings.warn(self.message, FutureWarning)
return super().get(key, *args, **kwargs)
|
DictWithDeprecationWarning
|
python
|
walkccc__LeetCode
|
solutions/2970. Count the Number of Incremovable Subarrays I/2970.py
|
{
"start": 0,
"end": 1149
}
|
class ____:
def incremovableSubarrayCount(self, nums: list[int]) -> int:
n = len(nums)
startIndex = self._getStartIndexOfSuffix(nums)
# If the complete array is strictly increasing, the total number of ways we
# can remove elements equals the total number of possible subarrays.
if startIndex == 0:
return n * (n + 1) // 2
# The valid removals starting from nums[0] include nums[0..startIndex - 1],
# nums[0..startIndex], ..., nums[0..n).
ans = n - startIndex + 1
# Enumerate each prefix subarray that is strictly increasing.
for i in range(startIndex):
if i > 0 and nums[i] <= nums[i - 1]:
break
# Since nums[0..i] is strictly increasing, find the first index j in
# nums[startIndex..n) such that nums[j] > nums[i]. The valid removals
# will then be nums[i + 1..j - 1], nums[i + 1..j], ..., nums[i + 1..n).
ans += n - bisect.bisect_right(nums, nums[i], startIndex) + 1
return ans
def _getStartIndexOfSuffix(self, nums: list[int]) -> int:
for i in range(len(nums) - 2, -1, -1):
if nums[i] >= nums[i + 1]:
return i + 1
return 0
|
Solution
|
python
|
ray-project__ray
|
python/ray/serve/_private/request_router/common.py
|
{
"start": 1812,
"end": 1890
}
|
class ____:
queue_len: int
timestamp: float
|
ReplicaQueueLengthCacheEntry
|
python
|
walkccc__LeetCode
|
solutions/806. Number of Lines To Write String/806.py
|
{
"start": 0,
"end": 346
}
|
class ____:
def numberOfLines(self, widths: list[int], s: str) -> list[int]:
numLines = 1
runningWidth = 0
for c in s:
width = widths[ord(c) - ord('a')]
if runningWidth + width <= 100:
runningWidth += width
else:
numLines += 1
runningWidth = width
return [numLines, runningWidth]
|
Solution
|
python
|
great-expectations__great_expectations
|
great_expectations/checkpoint/actions.py
|
{
"start": 2824,
"end": 3743
}
|
class ____:
"""
Shared context for all Actions in a Checkpoint run.
Note that order matters in the Action list, as the context is updated with each Action's result.
"""
def __init__(self) -> None:
self._data: list[tuple[ValidationAction, dict]] = []
@property
def data(self) -> list[tuple[ValidationAction, dict]]:
return self._data
def update(self, action: ValidationAction, action_result: dict) -> None:
self._data.append((action, action_result))
@public_api
def filter_results(self, class_: Type[ValidationAction]) -> list[dict]:
"""
Filter the results of the actions in the context by class.
Args:
class_: The class to filter by.
Returns:
A list of action results.
"""
return [action_result for action, action_result in self._data if isinstance(action, class_)]
|
ActionContext
|
python
|
walkccc__LeetCode
|
solutions/2430. Maximum Deletions on a String/2430.py
|
{
"start": 0,
"end": 509
}
|
class ____:
def deleteString(self, s: str) -> int:
n = len(s)
# lcs[i][j] := the number of the same letters of s[i..n) and s[j..n)
lcs = [[0] * (n + 1) for _ in range(n + 1)]
# dp[i] := the maximum number of operations needed to delete s[i..n)
dp = [1] * n
for i in range(n - 1, -1, -1):
for j in range(i + 1, n):
if s[i] == s[j]:
lcs[i][j] = lcs[i + 1][j + 1] + 1
if lcs[i][j] >= j - i:
dp[i] = max(dp[i], dp[j] + 1)
return dp[0]
|
Solution
|
python
|
modin-project__modin
|
modin/core/dataframe/algebra/default2pandas/rolling.py
|
{
"start": 882,
"end": 2474
}
|
class ____(DefaultMethod):
"""Builder for default-to-pandas aggregation on a rolling window functions."""
OBJECT_TYPE = "Rolling"
@classmethod
def _build_rolling(cls, func):
"""
Build function that creates a rolling window and executes `func` on it.
Parameters
----------
func : callable
Function to execute on a rolling window.
Returns
-------
callable
Function that takes pandas DataFrame and applies `func` on a rolling window.
"""
def fn(df, rolling_kwargs, *args, **kwargs):
"""Create rolling window for the passed frame and execute specified `func` on it."""
roller = df.rolling(**rolling_kwargs)
if type(func) is property:
return func.fget(roller)
return func(roller, *args, **kwargs)
return fn
@classmethod
def register(cls, func, **kwargs):
"""
Build function that do fallback to pandas to apply `func` on a rolling window.
Parameters
----------
func : callable
Function to execute on a rolling window.
**kwargs : kwargs
Additional arguments that will be passed to function builder.
Returns
-------
callable
Function that takes query compiler and defaults to pandas to apply aggregation
`func` on a rolling window.
"""
return super().register(
cls._build_rolling(func), fn_name=func.__name__, **kwargs
)
|
RollingDefault
|
python
|
huggingface__transformers
|
src/transformers/models/mobilebert/modeling_mobilebert.py
|
{
"start": 21613,
"end": 21942
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
|
MobileBertOnlyMLMHead
|
python
|
scrapy__scrapy
|
tests/test_spidermiddleware.py
|
{
"start": 8191,
"end": 10134
}
|
class ____(TestBaseAsyncSpiderMiddleware):
"""process_spider_output tests for simple callbacks"""
ITEM_TYPE = dict
MW_SIMPLE = ProcessSpiderOutputSimpleMiddleware
MW_ASYNCGEN = ProcessSpiderOutputAsyncGenMiddleware
MW_UNIVERSAL = ProcessSpiderOutputUniversalMiddleware
@deferred_f_from_coro_f
async def test_simple(self):
"""Simple mw"""
await self._test_simple_base(self.MW_SIMPLE)
@deferred_f_from_coro_f
async def test_asyncgen(self):
"""Asyncgen mw; upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN)
@deferred_f_from_coro_f
async def test_simple_asyncgen(self):
"""Simple mw -> asyncgen mw; upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, self.MW_SIMPLE)
@deferred_f_from_coro_f
async def test_asyncgen_simple(self):
"""Asyncgen mw -> simple mw; upgrade then downgrade"""
await self._test_simple_base(self.MW_SIMPLE, self.MW_ASYNCGEN, downgrade=True)
@deferred_f_from_coro_f
async def test_universal(self):
"""Universal mw"""
await self._test_simple_base(self.MW_UNIVERSAL)
@deferred_f_from_coro_f
async def test_universal_simple(self):
"""Universal mw -> simple mw"""
await self._test_simple_base(self.MW_SIMPLE, self.MW_UNIVERSAL)
@deferred_f_from_coro_f
async def test_simple_universal(self):
"""Simple mw -> universal mw"""
await self._test_simple_base(self.MW_UNIVERSAL, self.MW_SIMPLE)
@deferred_f_from_coro_f
async def test_universal_asyncgen(self):
"""Universal mw -> asyncgen mw; upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, self.MW_UNIVERSAL)
@deferred_f_from_coro_f
async def test_asyncgen_universal(self):
"""Asyncgen mw -> universal mw; upgrade"""
await self._test_asyncgen_base(self.MW_UNIVERSAL, self.MW_ASYNCGEN)
|
TestProcessSpiderOutputSimple
|
python
|
getsentry__sentry
|
tests/sentry/dynamic_sampling/tasks/test_tasks.py
|
{
"start": 1774,
"end": 3932
}
|
class ____(BaseMetricsLayerTestCase, TestCase, SnubaTestCase):
@staticmethod
def old_date():
return timezone.now() - timedelta(minutes=NEW_MODEL_THRESHOLD_IN_MINUTES + 1)
@staticmethod
def disable_all_biases(project):
project.update_option(
"sentry:dynamic_sampling_biases",
[
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False},
{"id": RuleType.IGNORE_HEALTH_CHECKS_RULE.value, "active": False},
{"id": RuleType.BOOST_LATEST_RELEASES_RULE.value, "active": False},
{"id": RuleType.BOOST_KEY_TRANSACTIONS_RULE.value, "active": False},
{"id": RuleType.BOOST_LOW_VOLUME_TRANSACTIONS_RULE.value, "active": False},
{"id": RuleType.BOOST_REPLAY_ID_RULE.value, "active": False},
],
)
def create_old_organization(self, name):
return self.create_organization(name=name, date_added=self.old_date())
def create_old_project(self, name, organization):
return self.create_project(name=name, organization=organization, date_added=self.old_date())
def create_project_and_add_metrics(self, name, count, org, tags=None, is_old=True):
if tags is None:
tags = {"transaction": "foo_transaction"}
if is_old:
proj = self.create_old_project(name=name, organization=org)
else:
proj = self.create_project(name=name, organization=org)
self.disable_all_biases(project=proj)
self.store_performance_metric(
name=TransactionMRI.COUNT_PER_ROOT_PROJECT.value,
tags=tags,
minutes_before_now=30,
value=count,
project_id=proj.id,
org_id=org.id,
)
return proj
def create_project_without_metrics(self, name, org, is_old=True):
if is_old:
proj = self.create_old_project(name=name, organization=org)
else:
proj = self.create_project(name=name, organization=org)
self.disable_all_biases(project=proj)
return proj
@freeze_time(MOCK_DATETIME)
|
TasksTestCase
|
python
|
langchain-ai__langchain
|
libs/langchain/tests/unit_tests/runnables/test_openai_functions.py
|
{
"start": 490,
"end": 3007
}
|
class ____(BaseChatModel):
@property
def _llm_type(self) -> str:
return "fake-openai-chat-model"
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "accept",
"arguments": '{\n "draft": "turtles"\n}',
},
},
),
),
],
)
def test_openai_functions_router(
snapshot: SnapshotAssertion,
mocker: MockerFixture,
) -> None:
revise = mocker.Mock(
side_effect=lambda kw: f"Revised draft: no more {kw['notes']}!",
)
accept = mocker.Mock(side_effect=lambda kw: f"Accepted draft: {kw['draft']}!")
router = OpenAIFunctionsRouter(
{
"revise": revise,
"accept": accept,
},
functions=[
{
"name": "revise",
"description": "Sends the draft for revision.",
"parameters": {
"type": "object",
"properties": {
"notes": {
"type": "string",
"description": "The editor's notes to guide the revision.",
},
},
},
},
{
"name": "accept",
"description": "Accepts the draft.",
"parameters": {
"type": "object",
"properties": {
"draft": {
"type": "string",
"description": "The draft to accept.",
},
},
},
},
],
)
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke("Something about turtles?") == "Accepted draft: turtles!"
revise.assert_not_called()
accept.assert_called_once_with({"draft": "turtles"})
|
FakeChatOpenAI
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/memory/vectorstore.py
|
{
"start": 604,
"end": 4247
}
|
class ____(BaseMemory):
"""Vector Store Retriever Memory.
Store the conversation history in a vector store and retrieves the relevant
parts of past conversation based on the input.
"""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""
memory_key: str = "history"
"""Key name to locate the memories in the result of load_memory_variables."""
input_key: str | None = None
"""Key name to index the inputs to load_memory_variables."""
return_docs: bool = False
"""Whether or not to return the result of querying the database directly."""
exclude_input_keys: Sequence[str] = Field(default_factory=tuple)
"""Input keys to exclude in addition to memory key when constructing the document"""
@property
def memory_variables(self) -> list[str]:
"""The list of keys emitted from the load_memory_variables method."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _documents_to_memory_variables(
self,
docs: list[Document],
) -> dict[str, list[Document] | str]:
result: list[Document] | str
if not self.return_docs:
result = "\n".join([doc.page_content for doc in docs])
else:
result = docs
return {self.memory_key: result}
def load_memory_variables(
self,
inputs: dict[str, Any],
) -> dict[str, list[Document] | str]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.invoke(query)
return self._documents_to_memory_variables(docs)
async def aload_memory_variables(
self,
inputs: dict[str, Any],
) -> dict[str, list[Document] | str]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = await self.retriever.ainvoke(query)
return self._documents_to_memory_variables(docs)
def _form_documents(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> list[Document]:
"""Format context from this conversation to buffer."""
# Each document should only include the current turn, not the chat history
exclude = set(self.exclude_input_keys)
exclude.add(self.memory_key)
filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude}
texts = [
f"{k}: {v}"
for k, v in list(filtered_inputs.items()) + list(outputs.items())
]
page_content = "\n".join(texts)
return [Document(page_content=page_content)]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
self.retriever.add_documents(documents)
async def asave_context(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
await self.retriever.aadd_documents(documents)
def clear(self) -> None:
"""Nothing to clear."""
async def aclear(self) -> None:
"""Nothing to clear."""
|
VectorStoreRetrieverMemory
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/lang/special_functions_test.py
|
{
"start": 1055,
"end": 4357
}
|
class ____(test.TestCase):
def test_match_staging_level(self):
some_tensor = constant_op.constant(0)
tensor_one = special_functions.match_staging_level(1, some_tensor)
python_one = special_functions.match_staging_level(1, 1)
with self.cached_session() as sess:
self.assertTrue(tensor_util.is_tf_type(tensor_one))
self.assertAllEqual(self.evaluate(tensor_one), 1)
self.assertEqual(python_one, 1)
def test_tensor_list_empty_list(self):
l = special_functions.tensor_list([],
element_dtype=dtypes.int32,
element_shape=())
sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(sl), [])
l = special_functions.tensor_list((),
element_dtype=dtypes.int32,
element_shape=())
sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(sl), [])
def test_tensor_list_tensor(self):
l = special_functions.tensor_list(
constant_op.constant([], dtype=dtypes.int32))
sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(sl), [])
def test_tensor_list_unsupported_initializer(self):
with self.assertRaisesRegex(ValueError, 'unknown type'):
special_functions.tensor_list(np.array([1, 2, 3]))
def test_tensor_list_empty_list_no_type(self):
with self.assertRaisesRegex(ValueError,
'element_dtype and element_shape are required'):
special_functions.tensor_list([])
def test_tensor_list_from_elements(self):
elements = [constant_op.constant([1, 2]), constant_op.constant([3, 4])]
l = special_functions.tensor_list(elements)
sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(sl), [[1, 2], [3, 4]])
def test_tensor_list_array_from_elements(self):
elements = [constant_op.constant([1, 2]), constant_op.constant([3, 4])]
l = special_functions.tensor_list(elements, use_tensor_array=True)
sl = l.stack()
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(sl), [[1, 2], [3, 4]])
def test_stack(self):
self.assertEqual(special_functions.stack(1, strict=False), 1)
self.assertListEqual(
special_functions.stack([1, 2, 3], strict=False), [1, 2, 3])
# TODO(mdan): This should probably forward to tf.stack.
self.assertTrue(
isinstance(
special_functions.stack(
[constant_op.constant(1),
constant_op.constant(2)], strict=False), list))
with self.assertRaises(ValueError):
special_functions.stack([1, 2, 3])
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(
t, element_shape=constant_op.constant([], dtype=dtypes.int32))
self.assertTrue(
tensor_util.is_tf_type(
special_functions.stack(l, element_dtype=dtypes.float32)))
if __name__ == '__main__':
test.main()
|
SpecialFunctionsTest
|
python
|
Pylons__pyramid
|
src/pyramid/interfaces.py
|
{
"start": 955,
"end": 1231
}
|
class ____(Interface):
"""An event type that is emitted whenever :app:`Pyramid`
begins to process a new request. See the documentation attached
to :class:`pyramid.events.NewRequest` for more information."""
request = Attribute('The request object')
|
INewRequest
|
python
|
docker__docker-py
|
tests/integration/models_images_test.py
|
{
"start": 4964,
"end": 5807
}
|
class ____(BaseIntegrationTest):
def test_tag_and_remove(self):
repo = 'dockersdk.tests.images.test_tag'
tag = 'some-tag'
identifier = f'{repo}:{tag}'
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
result = image.tag(repo, tag)
assert result is True
self.tmp_imgs.append(identifier)
assert image.id in get_ids(client.images.list(repo))
assert image.id in get_ids(client.images.list(identifier))
client.images.remove(identifier)
assert image.id not in get_ids(client.images.list(repo))
assert image.id not in get_ids(client.images.list(identifier))
assert image.id in get_ids(client.images.list('alpine:latest'))
def get_ids(images):
return [i.id for i in images]
|
ImageTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-distance-between-unequal-words-in-array-i.py
|
{
"start": 41,
"end": 332
}
|
class ____(object):
def maxDistance(self, words):
"""
:type words: List[str]
:rtype: int
"""
for i in xrange(len(words)//2+1):
if words[~i] != words[0] or words[i] != words[-1]:
return len(words)-i
return 0
|
Solution
|
python
|
pydata__xarray
|
xarray/groupers.py
|
{
"start": 31006,
"end": 39714
}
|
class ____(Resampler):
"""Allows grouping using a custom definition of seasons.
Parameters
----------
seasons: Sequence[str]
An ordered list of seasons.
drop_incomplete: bool
Whether to drop seasons that are not completely included in the data.
For example, if a time series starts in Jan-2001, and seasons includes `"DJF"`
then observations from Jan-2001, and Feb-2001 are ignored in the grouping
since Dec-2000 isn't present.
Examples
--------
>>> SeasonResampler(["JF", "MAM", "JJAS", "OND"])
SeasonResampler(seasons=['JF', 'MAM', 'JJAS', 'OND'], drop_incomplete=True)
>>> SeasonResampler(["DJFM", "AM", "JJA", "SON"])
SeasonResampler(seasons=['DJFM', 'AM', 'JJA', 'SON'], drop_incomplete=True)
"""
seasons: Sequence[str]
drop_incomplete: bool = field(default=True, kw_only=True)
season_inds: Sequence[Sequence[int]] = field(init=False, repr=False)
season_tuples: Mapping[str, Sequence[int]] = field(init=False, repr=False)
def __post_init__(self):
self.season_inds = season_to_month_tuple(self.seasons)
all_inds = functools.reduce(operator.add, self.season_inds)
if len(all_inds) > len(set(all_inds)):
raise ValueError(
f"Overlapping seasons are not allowed. Received {self.seasons!r}"
)
self.season_tuples = dict(zip(self.seasons, self.season_inds, strict=True))
if not is_sorted_periodic(list(itertools.chain(*self.season_inds))):
raise ValueError(
"Resampling is only supported with sorted seasons. "
f"Provided seasons {self.seasons!r} are not sorted."
)
def factorize(self, group: T_Group) -> EncodedGroups:
if group.ndim != 1:
raise ValueError(
"SeasonResampler can only be used to resample by 1D arrays."
)
if not isinstance(group, DataArray) or not _contains_datetime_like_objects(
group.variable
):
raise ValueError(
"SeasonResampler can only be used to group by datetime-like DataArrays."
)
seasons = self.seasons
season_inds = self.season_inds
season_tuples = self.season_tuples
nstr = max(len(s) for s in seasons)
year = group.dt.year.astype(int)
month = group.dt.month.astype(int)
season_label = np.full(group.shape, "", dtype=f"U{nstr}")
# offset years for seasons with December and January
for season_str, season_ind in zip(seasons, season_inds, strict=True):
season_label[month.isin(season_ind)] = season_str
if "DJ" in season_str:
after_dec = season_ind[season_str.index("D") + 1 :]
# important: this is assuming non-overlapping seasons
year[month.isin(after_dec)] -= 1
# Allow users to skip one or more months?
# present_seasons is a mask that is True for months that are requested in the output
present_seasons = season_label != ""
if present_seasons.all():
# avoid copies if we can.
present_seasons = slice(None)
frame = pd.DataFrame(
data={
"index": np.arange(group[present_seasons].size),
"month": month[present_seasons],
},
index=pd.MultiIndex.from_arrays(
[year.data[present_seasons], season_label[present_seasons]],
names=["year", "season"],
),
)
agged = (
frame["index"]
.groupby(["year", "season"], sort=False)
.agg(["first", "count"])
)
first_items = agged["first"]
counts = agged["count"]
index_class: type[CFTimeIndex | pd.DatetimeIndex]
if _contains_cftime_datetimes(group.data):
index_class = CFTimeIndex
datetime_class = type(first_n_items(group.data, 1).item())
else:
index_class = pd.DatetimeIndex
datetime_class = datetime.datetime
# these are the seasons that are present
unique_coord = index_class(
[
datetime_class(year=year, month=season_tuples[season][0], day=1)
for year, season in first_items.index
]
)
# This sorted call is a hack. It's hard to figure out how
# to start the iteration for arbitrary season ordering
# for example "DJF" as first entry or last entry
# So we construct the largest possible index and slice it to the
# range present in the data.
complete_index = index_class(
sorted(
[
datetime_class(year=y, month=m, day=1)
for y, m in itertools.product(
range(year[0].item(), year[-1].item() + 1),
[s[0] for s in season_inds],
)
]
)
)
# all years and seasons
def get_label(year, season):
month, *_ = season_tuples[season]
return f"{year}-{month:02d}-01"
unique_codes = np.arange(len(unique_coord))
valid_season_mask = season_label != ""
first_valid_season, last_valid_season = season_label[valid_season_mask][[0, -1]]
first_year, last_year = year.data[[0, -1]]
if self.drop_incomplete:
if month.data[valid_season_mask][0] != season_tuples[first_valid_season][0]:
if "DJ" in first_valid_season:
first_year += 1
first_valid_season = seasons[
(seasons.index(first_valid_season) + 1) % len(seasons)
]
unique_codes -= 1
if (
month.data[valid_season_mask][-1]
!= season_tuples[last_valid_season][-1]
):
last_valid_season = seasons[seasons.index(last_valid_season) - 1]
if "DJ" in last_valid_season:
last_year -= 1
unique_codes[-1] = -1
first_label = get_label(first_year, first_valid_season)
last_label = get_label(last_year, last_valid_season)
slicer = complete_index.slice_indexer(first_label, last_label)
full_index = complete_index[slicer]
final_codes = np.full(group.data.size, -1)
final_codes[present_seasons] = np.repeat(unique_codes, counts)
codes = group.copy(data=final_codes, deep=False)
return EncodedGroups(codes=codes, full_index=full_index)
def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]:
"""
Compute chunk sizes for this season resampler.
This method is used during chunking operations to determine appropriate
chunk sizes for the given variable when using this resampler.
Parameters
----------
name : Hashable
The name of the dimension being chunked.
variable : Variable
The variable being chunked.
Returns
-------
tuple[int, ...]
A tuple of chunk sizes for the dimension.
"""
if not _contains_datetime_like_objects(variable):
raise ValueError(
f"Computing chunks with {type(self)!r} only supported for datetime variables. "
f"Received variable with dtype {variable.dtype!r} instead."
)
if len("".join(self.seasons)) != 12:
raise ValueError(
"Cannot rechunk with a SeasonResampler that does not cover all 12 months. "
f"Received `seasons={self.seasons!r}`."
)
# Create a temporary resampler that ignores drop_incomplete for chunking
# This prevents data from being silently dropped during chunking
resampler_for_chunking = type(self)(seasons=self.seasons, drop_incomplete=False)
chunks = (
DataArray(
np.ones(variable.shape, dtype=int),
dims=(dim,),
coords={dim: variable},
)
.resample({dim: resampler_for_chunking})
.sum()
)
# When bins (binning) or time periods are missing (resampling)
# we can end up with NaNs. Drop them.
if chunks.dtype.kind == "f":
chunks = chunks.dropna(dim).astype(int)
chunks_tuple: tuple[int, ...] = tuple(chunks.data.tolist())
return chunks_tuple
def reset(self) -> Self:
return type(self)(seasons=self.seasons, drop_incomplete=self.drop_incomplete)
|
SeasonResampler
|
python
|
apache__airflow
|
providers/apache/kafka/tests/integration/apache/kafka/hooks/test_admin_client.py
|
{
"start": 1108,
"end": 1837
}
|
class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="kafka_d",
conn_type="kafka",
extra=json.dumps(client_config),
)
)
def test_hook(self):
"""test the creation of topics"""
# Standard Init
hook = KafkaAdminClientHook(kafka_config_id="kafka_d")
hook.create_topic(topics=[("test_1", 1, 1), ("test_2", 1, 1)])
kadmin = hook.get_conn
t = kadmin.list_topics(timeout=10).topics
assert t.get("test_2")
hook.delete_topic(topics=["test_1", "test_2"])
|
TestKafkaAdminClientHook
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_itertools.py
|
{
"start": 110485,
"end": 111089
}
|
class ____(__TestCase):
def test_repeat(self):
self.assertEqual(operator.length_hint(repeat(None, 50)), 50)
self.assertEqual(operator.length_hint(repeat(None, 0)), 0)
self.assertEqual(operator.length_hint(repeat(None), 12), 12)
def test_repeat_with_negative_times(self):
self.assertEqual(operator.length_hint(repeat(None, -1)), 0)
self.assertEqual(operator.length_hint(repeat(None, -2)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-1)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-2)), 0)
|
LengthTransparency
|
python
|
huggingface__transformers
|
src/transformers/models/afmoe/modeling_afmoe.py
|
{
"start": 5904,
"end": 6651
}
|
class ____(nn.Module):
def __init__(self, config, intermediate_size=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
AfmoeMLP
|
python
|
great-expectations__great_expectations
|
great_expectations/core/configuration.py
|
{
"start": 416,
"end": 1237
}
|
class ____(ABC, SerializableDictDot):
"""Abstract base class for Config objects. Sets the fields that must be included on a Config."""
def __init__(self, id: Optional[str] = None, name: Optional[str] = None) -> None:
self.id = id
self.name = name
super().__init__()
@override
def __repr__(self) -> str:
return pf(self.to_dict(), indent=2, sort_dicts=True)
@classmethod
def _dict_round_trip(cls, schema: Schema, target: dict) -> dict:
"""
Round trip a dictionary with a schema so that validation and serialization logic is applied.
Example: Loading a config with a `id_` field but serializing it as `id`.
"""
_loaded = schema.load(target)
_config = cls(**_loaded)
return _config.to_json_dict()
|
AbstractConfig
|
python
|
python-excel__xlwt
|
tests/test_easyxf.py
|
{
"start": 841,
"end": 2255
}
|
class ____(unittest.TestCase):
def create_example_xls(self, filename):
mkd = datetime.date
hdngs = ['Date', 'Stock Code', 'Quantity', 'Unit Price', 'Value', 'Message']
kinds = 'date text int price money text'.split()
data = [
[mkd(2007, 7, 1), 'ABC', 1000, 1.234567, 1234.57, ''],
[mkd(2007, 12, 31), 'XYZ', -100, 4.654321, -465.43, 'Goods returned'],
] + [
[mkd(2008, 6, 30), 'PQRCD', 100, 2.345678, 234.57, ''],
] * 100
heading_xf = ezxf('font: bold on; align: wrap on, vert centre, horiz center')
kind_to_xf_map = {
'date': ezxf(num_format_str='yyyy-mm-dd'),
'int': ezxf(num_format_str='#,##0'),
'money': ezxf('font: italic on; pattern: pattern solid, fore-colour grey25',
num_format_str='$#,##0.00'),
'price': ezxf(num_format_str='#0.000000'),
'text': ezxf(),
}
data_xfs = [kind_to_xf_map[k] for k in kinds]
write_xls(filename, 'Demo', hdngs, data, heading_xf, data_xfs)
def test_example_xls(self):
self.create_example_xls(in_tst_output_dir(EXAMPLE_XLS))
self.assertTrue(filecmp.cmp(in_tst_dir(EXAMPLE_XLS),
in_tst_output_dir(EXAMPLE_XLS),
shallow=False))
|
TestUnicode0
|
python
|
justquick__django-activity-stream
|
actstream/templatetags/activity_tags.py
|
{
"start": 2999,
"end": 3861
}
|
class ____(AsNode):
def render_result(self, context):
action_instance = context['action'] = self.args[0].resolve(context)
templates = [
'actstream/%s/action.html' % action_instance.verb.replace(' ', '_'),
'actstream/action.html',
]
return render_to_string(templates, context.flatten())
def display_action(parser, token):
"""
Renders the template for the action description
::
{% display_action action %}
"""
return DisplayAction.handle_token(parser, token)
def is_following(user, actor):
"""
Returns true if the given user is following the actor
::
{% if request.user|is_following:another_user %}
You are already following {{ another_user }}
{% endif %}
"""
return Follow.objects.is_following(user, actor)
|
DisplayAction
|
python
|
bokeh__bokeh
|
src/bokeh/models/axes.py
|
{
"start": 9552,
"end": 12262
}
|
class ____(Axis):
''' An axis that displays ticks and labels for categorical ranges.
The ``CategoricalAxis`` can handle factor ranges with up to two levels of
nesting, including drawing a separator line between top-level groups of
factors.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
ticker = Override(default=InstanceDefault(CategoricalTicker))
formatter = Override(default=InstanceDefault(CategoricalTickFormatter))
separator_props = Include(ScalarLineProps, prefix="separator", help="""
The {prop} of the separator line between top-level categorical groups.
This property always applies to factors in the outermost level of nesting.
""")
separator_line_color = Override(default="lightgrey")
separator_line_width = Override(default=2)
group_props = Include(ScalarTextProps, prefix="group", help="""
The {prop} of the group categorical labels.
This property always applies to factors in the outermost level of nesting.
If the list of categorical factors is flat (i.e. no nesting) then this
property has no effect.
""")
group_label_orientation = Either(Enum(LabelOrientation), Float, default="parallel", help="""
What direction the group label text should be oriented.
If a number is supplied, the angle of the text is measured from horizontal.
This property always applies to factors in the outermost level of nesting.
If the list of categorical factors is flat (i.e. no nesting) then this
property has no effect.
""")
group_text_font_size = Override(default="11px")
group_text_font_style = Override(default="bold")
group_text_color = Override(default="grey")
subgroup_props = Include(ScalarTextProps, prefix="subgroup", help="""
The {prop} of the subgroup categorical labels.
This property always applies to factors in the middle level of nesting.
If the list of categorical factors is has only zero or one levels of nesting,
then this property has no effect.
""")
subgroup_label_orientation = Either(Enum(LabelOrientation), Float, default="parallel", help="""
What direction the subgroup label text should be oriented.
If a number is supplied, the angle of the text is measured from horizontal.
This property always applies to factors in the middle level of nesting.
If the list of categorical factors is has only zero or one levels of nesting,
then this property has no effect.
""")
subgroup_text_font_size = Override(default="11px")
subgroup_text_font_style = Override(default="bold")
|
CategoricalAxis
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/channels.py
|
{
"start": 207600,
"end": 215799
}
|
class ____(
DatumChannelMixin, core.FieldOrDatumDefWithConditionDatumDefGradientstringnull
):
"""
FillDatum schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
condition : dict, :class:`ConditionalValueDefGradientstringnullExprRef`, :class:`ConditionalParameterValueDefGradientstringnullExprRef`, :class:`ConditionalPredicateValueDefGradientstringnullExprRef`, Sequence[dict, :class:`ConditionalValueDefGradientstringnullExprRef`, :class:`ConditionalParameterValueDefGradientstringnullExprRef`, :class:`ConditionalPredicateValueDefGradientstringnullExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "fill"
@overload
def bandPosition(self, _: float, /) -> FillDatum: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
) -> FillDatum: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
) -> FillDatum: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefGradientstringnullExprRef], /
) -> FillDatum: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> FillDatum: ...
@overload
def type(self, _: Type_T, /) -> FillDatum: ...
def __init__(
self,
datum,
bandPosition: Optional[float] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
datum=datum,
bandPosition=bandPosition,
condition=condition,
title=title,
type=type,
**kwds,
)
@with_property_setters
|
FillDatum
|
python
|
numba__numba
|
numba/cuda/tests/cudadrv/test_cuda_devicerecord.py
|
{
"start": 3502,
"end": 3808
}
|
class ____(TestCudaDeviceRecord):
"""
Tests the DeviceRecord class with np.record host types
"""
def setUp(self):
CUDATestCase.setUp(self)
self._create_data(np.recarray)
@skip_on_cudasim('Structured array attr access not supported in simulator')
|
TestCudaDeviceRecordWithRecord
|
python
|
ray-project__ray
|
python/ray/_private/external_storage.py
|
{
"start": 21417,
"end": 24748
}
|
class ____(FileSystemStorage):
"""This class is for testing slow object spilling."""
def __init__(self, node_id: str, **kwargs):
super().__init__(node_id, **kwargs)
self._min_delay = 1
self._max_delay = 2
def spill_objects(self, object_refs, owner_addresses) -> List[str]:
delay = random.random() * (self._max_delay - self._min_delay) + self._min_delay
time.sleep(delay)
return super().spill_objects(object_refs, owner_addresses)
def setup_external_storage(config, node_id, session_name):
"""Setup the external storage according to the config."""
assert node_id is not None, "node_id should be provided."
global _external_storage
if config:
storage_type = config["type"]
if storage_type == "filesystem":
_external_storage = FileSystemStorage(node_id, **config["params"])
elif storage_type == "smart_open":
_external_storage = ExternalStorageSmartOpenImpl(
node_id, **config["params"]
)
elif storage_type == "mock_distributed_fs":
# This storage is used to unit test distributed external storages.
# TODO(sang): Delete it after introducing the mock S3 test.
_external_storage = FileSystemStorage(node_id, **config["params"])
elif storage_type == "unstable_fs":
# This storage is used to unit test unstable file system for fault
# tolerance.
_external_storage = UnstableFileStorage(node_id, **config["params"])
elif storage_type == "slow_fs":
# This storage is used to unit test slow filesystems.
_external_storage = SlowFileStorage(node_id, **config["params"])
else:
raise ValueError(f"Unknown external storage type: {storage_type}")
else:
_external_storage = NullStorage()
return _external_storage
def reset_external_storage():
global _external_storage
_external_storage = NullStorage()
def spill_objects(object_refs, owner_addresses):
"""Spill objects to the external storage. Objects are specified
by their object refs.
Args:
object_refs: The list of the refs of the objects to be spilled.
owner_addresses: The owner addresses of the provided object refs.
Returns:
A list of keys corresponding to the input object refs.
"""
return _external_storage.spill_objects(object_refs, owner_addresses)
def restore_spilled_objects(
object_refs: List[ObjectRef], url_with_offset_list: List[str]
):
"""Restore objects from the external storage.
Args:
object_refs: List of object IDs (note that it is not ref).
url_with_offset_list: List of url_with_offset.
"""
return _external_storage.restore_spilled_objects(object_refs, url_with_offset_list)
def delete_spilled_objects(urls: List[str]):
"""Delete objects that are spilled to the external storage.
Args:
urls: URLs that store spilled object files.
"""
_external_storage.delete_spilled_objects(urls)
def _get_unique_spill_filename(object_refs: List[ObjectRef]):
"""Generate a unqiue spill file name.
Args:
object_refs: objects to be spilled in this file.
"""
return f"{uuid.uuid4().hex}-multi-{len(object_refs)}"
|
SlowFileStorage
|
python
|
pytorch__pytorch
|
test/dynamo/test_modules.py
|
{
"start": 12694,
"end": 12982
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(10, 10)
self.mode = "some_string"
def forward(self, x):
if self.mode == "some_string":
return F.relu(self.linear1(x))
|
StringMember
|
python
|
modin-project__modin
|
asv_bench/benchmarks/benchmarks.py
|
{
"start": 37184,
"end": 37610
}
|
class ____:
params = [get_benchmark_shapes("TimeReplace")]
param_names = ["shape"]
def setup(self, shape):
rows, cols = shape
self.to_replace = {i: getattr(IMPL, "Timestamp")(i) for i in range(rows)}
self.df = IMPL.DataFrame(np.random.randint(rows, size=(rows, cols)))
execute(self.df)
def time_replace(self, shape):
execute(self.df.replace(self.to_replace))
|
TimeReplace
|
python
|
great-expectations__great_expectations
|
scripts/cleanup/cleanup_snowflake.py
|
{
"start": 299,
"end": 2220
}
|
class ____(BaseSettings):
"""Environment variables for Snowflake connection.
These are injected in via CI, but when running locally, you may use your own credentials.
"""
SNOWFLAKE_CI_USER_PASSWORD: str
SNOWFLAKE_CI_ACCOUNT: str
@property
def connection_string(self) -> str:
return (
f"snowflake://ci:{self.SNOWFLAKE_CI_USER_PASSWORD}@oca29081.us-east-1/ci?"
f"warehouse=ci&role=ci"
)
# Regex to match uppercase schema names
# (Snowflake converts unquoted identifiers to uppercase)
SCHEMA_PATTERN_TEST = "^TEST_[A-Z]{10}$" # General SQL testing framework
SCHEMA_PATTERN_PY_VERSION = "^PY3[0-9]{1,2}_I[A-F0-9]{32}$" # Python version-specific test schemas
SCHEMA_FORMAT = f"{SCHEMA_PATTERN_TEST}|{SCHEMA_PATTERN_PY_VERSION}"
def cleanup_snowflake(config: SnowflakeConnectionConfig) -> None:
engine = create_engine(url=config.connection_string)
with engine.connect() as conn, conn.begin():
results = conn.execute(
TextClause(
f"SELECT 'DROP SCHEMA IF EXISTS ' || schema_name || ' CASCADE;' as drop_statement "
f"FROM INFORMATION_SCHEMA.SCHEMATA "
f"WHERE REGEXP_LIKE(schema_name, '{SCHEMA_FORMAT}') "
f"AND created < DATEADD(hour, -1, CURRENT_TIMESTAMP())"
)
).fetchall()
if results:
for row in results:
drop_statement = row[0]
logger.info(f"Executing: {drop_statement}")
conn.execute(TextClause(drop_statement))
logger.info(f"Cleaned up {len(results)} Snowflake schema(s)")
else:
logger.info("No Snowflake schemas to clean up!")
engine.dispose()
if __name__ == "__main__":
config = SnowflakeConnectionConfig() # type: ignore[call-arg] # pydantic populates from env vars
cleanup_snowflake(config)
|
SnowflakeConnectionConfig
|
python
|
getsentry__sentry
|
tests/sentry/db/models/fields/bitfield/test_bitfield.py
|
{
"start": 13697,
"end": 14343
}
|
class ____(unittest.TestCase):
def test_can_unserialize_bithandler(self) -> None:
bf = BitFieldTestModel()
bf.flags.FLAG_0 = True
bf.flags.FLAG_1 = False
data = pickle.dumps(bf)
inst = pickle.loads(data)
self.assertTrue(inst.flags.FLAG_0)
self.assertFalse(inst.flags.FLAG_1)
def test_added_field(self) -> None:
bf = BitFieldTestModel()
bf.flags.FLAG_0 = True
bf.flags.FLAG_1 = False
bf.flags.FLAG_3 = False
data = pickle.dumps(bf)
inst = pickle.loads(data)
self.assertTrue("FLAG_3" in inst.flags.keys())
|
BitFieldSerializationTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-operations-to-maximize-last-elements-in-arrays.py
|
{
"start": 61,
"end": 688
}
|
class ____(object):
def minOperations(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
cnt = [0]*2
for x, y in itertools.izip(nums1, nums2):
if not (min(x, y) <= min(nums1[-1], nums2[-1]) and max(x, y) <= max(nums1[-1], nums2[-1])):
return -1
if not (x <= nums1[-1] and y <= nums2[-1]):
cnt[0] += 1
if not (x <= nums2[-1] and y <= nums1[-1]):
cnt[1] += 1
return min(cnt)
# Time: O(n)
# Space: O(1)
import itertools
# simulation
|
Solution
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v3/views.py
|
{
"start": 23127,
"end": 23784
}
|
class ____(
APIv3Settings,
GenericViewSet,
):
# TODO: migrate code from corporate here.
# NOTE: this viewset is only useful for nested URLs required for notifications:
# /api/v3/organizations/<slug>/notifications/
# However, accessing to /api/v3/organizations/ or /api/v3/organizations/<slug>/ will return 404.
# We can implement these endpoints when we need them, tho.
# Also note that Read the Docs for Business expose this endpoint already.
model = Organization
serializer_class = OrganizationSerializer
queryset = Organization.objects.none()
permission_classes = (IsAuthenticated,)
|
OrganizationsViewSetBase
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-kyve/source_kyve/source.py
|
{
"start": 279,
"end": 1839
}
|
class ____(AbstractSource):
def check_connection(self, logger, config: Mapping[str, Any]) -> Tuple[bool, any]:
# check that pools and bundles are the same length
pools = config.get("pool_ids").split(",")
start_ids = config.get("start_ids").split(",")
if not len(pools) == len(start_ids):
return False, "Please add a start_id for every pool"
for pool_id in pools:
try:
# check if endpoint is available and returns valid data
response = requests.get(f"{config['url_base']}/kyve/query/v1beta1/pool/{pool_id}")
if not response.ok:
# todo improve error handling for cases like pool not found
return False, response.json()
except Exception as e:
return False, e
return True, None
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
streams: List[Stream] = []
pools = config.get("pool_ids").split(",")
start_ids = config.get("start_ids").split(",")
for pool_id, start_id in zip(pools, start_ids):
response = requests.get(f"{config['url_base']}/kyve/query/v1beta1/pool/{pool_id}")
pool_data = response.json().get("pool").get("data")
config_copy = dict(deepcopy(config))
config_copy["start_ids"] = int(start_id)
# add a new stream based on the pool_data
streams.append(KYVEStream(config=config_copy, pool_data=pool_data))
return streams
|
SourceKyve
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/userreport.py
|
{
"start": 1012,
"end": 3033
}
|
class ____(Serializer):
def get_attrs(self, item_list, user, **kwargs):
attrs: dict[str, _EventUser] = {}
project = Project.objects.get(id=item_list[0].project_id)
retention = quotas.backend.get_event_retention(organization=project.organization)
events = eventstore.backend.get_events(
filter=eventstore.Filter(
event_ids=[item.event_id for item in item_list],
project_ids=[project.id],
start=timezone.now() - timedelta(days=retention) if retention else None,
),
referrer="UserReportSerializer.get_attrs",
dataset=Dataset.Events,
tenant_ids={"organization_id": project.organization_id},
)
events_dict: dict[str, Event] = {event.event_id: event for event in events}
for item in item_list:
attrs[item] = {
"event_user": (
EventUser.from_event(events_dict[item.event_id])
if events_dict.get(item.event_id)
else {}
)
}
return attrs
def serialize(self, obj, attrs, user, **kwargs) -> UserReportSerializerResponse:
# TODO(dcramer): add in various context from the event
# context == user / http / extra interfaces
name = obj.name or obj.email
email = obj.email
user = None
if attrs["event_user"]:
event_user = attrs["event_user"]
if isinstance(event_user, EventUser):
name = name or event_user.name
email = email or event_user.email
user = event_user.serialize()
return {
"id": str(obj.id),
"eventID": obj.event_id,
"name": name,
"email": email,
"comments": obj.comments,
"dateCreated": obj.date_added,
"user": user,
"event": {"id": obj.event_id, "eventID": obj.event_id},
}
|
UserReportSerializer
|
python
|
django__django
|
tests/inspectdb/tests.py
|
{
"start": 21049,
"end": 27271
}
|
class ____(TransactionTestCase):
available_apps = ["inspectdb"]
def test_include_views(self):
"""inspectdb --include-views creates models for database views."""
cursor_execute(
"CREATE VIEW inspectdb_people_view AS "
"SELECT id, name FROM inspectdb_people"
)
self.addCleanup(cursor_execute, "DROP VIEW inspectdb_people_view")
out = StringIO()
view_model = "class InspectdbPeopleView(models.Model):"
view_managed = "managed = False # Created from a view."
call_command(
"inspectdb",
table_name_filter=inspectdb_views_only,
stdout=out,
)
no_views_output = out.getvalue()
self.assertNotIn(view_model, no_views_output)
self.assertNotIn(view_managed, no_views_output)
call_command(
"inspectdb",
table_name_filter=inspectdb_views_only,
include_views=True,
stdout=out,
)
with_views_output = out.getvalue()
self.assertIn(view_model, with_views_output)
self.assertIn(view_managed, with_views_output)
@skipUnlessDBFeature("can_introspect_materialized_views")
def test_include_materialized_views(self):
"""inspectdb --include-views creates models for materialized views."""
cursor_execute(
"CREATE MATERIALIZED VIEW inspectdb_people_materialized AS "
"SELECT id, name FROM inspectdb_people"
)
self.addCleanup(
cursor_execute, "DROP MATERIALIZED VIEW inspectdb_people_materialized"
)
out = StringIO()
view_model = "class InspectdbPeopleMaterialized(models.Model):"
view_managed = "managed = False # Created from a view."
call_command(
"inspectdb",
table_name_filter=inspectdb_views_only,
stdout=out,
)
no_views_output = out.getvalue()
self.assertNotIn(view_model, no_views_output)
self.assertNotIn(view_managed, no_views_output)
call_command(
"inspectdb",
table_name_filter=inspectdb_views_only,
include_views=True,
stdout=out,
)
with_views_output = out.getvalue()
self.assertIn(view_model, with_views_output)
self.assertIn(view_managed, with_views_output)
@skipUnless(connection.vendor == "postgresql", "PostgreSQL specific SQL")
def test_include_partitions(self):
"""inspectdb --include-partitions creates models for partitions."""
cursor_execute(
"""
CREATE TABLE inspectdb_partition_parent (name text not null)
PARTITION BY LIST (left(upper(name), 1))
""",
"""
CREATE TABLE inspectdb_partition_child
PARTITION OF inspectdb_partition_parent
FOR VALUES IN ('A', 'B', 'C')
""",
)
self.addCleanup(
cursor_execute,
"DROP TABLE IF EXISTS inspectdb_partition_child",
"DROP TABLE IF EXISTS inspectdb_partition_parent",
)
out = StringIO()
partition_model_parent = "class InspectdbPartitionParent(models.Model):"
partition_model_child = "class InspectdbPartitionChild(models.Model):"
partition_managed = "managed = False # Created from a partition."
call_command("inspectdb", table_name_filter=inspectdb_tables_only, stdout=out)
no_partitions_output = out.getvalue()
self.assertIn(partition_model_parent, no_partitions_output)
self.assertNotIn(partition_model_child, no_partitions_output)
self.assertNotIn(partition_managed, no_partitions_output)
call_command(
"inspectdb",
table_name_filter=inspectdb_tables_only,
include_partitions=True,
stdout=out,
)
with_partitions_output = out.getvalue()
self.assertIn(partition_model_parent, with_partitions_output)
self.assertIn(partition_model_child, with_partitions_output)
self.assertIn(partition_managed, with_partitions_output)
@skipUnless(connection.vendor == "postgresql", "PostgreSQL specific SQL")
def test_foreign_data_wrapper(self):
cursor_execute(
"CREATE EXTENSION IF NOT EXISTS file_fdw",
"CREATE SERVER inspectdb_server FOREIGN DATA WRAPPER file_fdw",
"""
CREATE FOREIGN TABLE inspectdb_iris_foreign_table (
petal_length real,
petal_width real,
sepal_length real,
sepal_width real
) SERVER inspectdb_server OPTIONS (
program 'echo 1,2,3,4',
format 'csv'
)
""",
)
self.addCleanup(
cursor_execute,
"DROP FOREIGN TABLE IF EXISTS inspectdb_iris_foreign_table",
"DROP SERVER IF EXISTS inspectdb_server",
"DROP EXTENSION IF EXISTS file_fdw",
)
out = StringIO()
foreign_table_model = "class InspectdbIrisForeignTable(models.Model):"
foreign_table_managed = "managed = False"
call_command(
"inspectdb",
table_name_filter=inspectdb_tables_only,
stdout=out,
)
output = out.getvalue()
self.assertIn(foreign_table_model, output)
self.assertIn(foreign_table_managed, output)
def test_composite_primary_key(self):
out = StringIO()
field_type = connection.features.introspected_field_types["IntegerField"]
call_command("inspectdb", "inspectdb_compositepkmodel", stdout=out)
output = out.getvalue()
self.assertIn(
"pk = models.CompositePrimaryKey('column_1', 'column_2')",
output,
)
self.assertIn(f"column_1 = models.{field_type}()", output)
self.assertIn(f"column_2 = models.{field_type}()", output)
def test_composite_primary_key_not_unique_together(self):
out = StringIO()
call_command("inspectdb", "inspectdb_compositepkmodel", stdout=out)
self.assertNotIn("unique_together", out.getvalue())
|
InspectDBTransactionalTests
|
python
|
urllib3__urllib3
|
test/with_dummyserver/test_socketlevel.py
|
{
"start": 5480,
"end": 12643
}
|
class ____(SocketDummyServerTestCase):
"""
Tests for client certificate support.
"""
@classmethod
def setup_class(cls) -> None:
cls.tmpdir = tempfile.mkdtemp()
ca = trustme.CA()
cert = ca.issue_cert("localhost")
encrypted_key = encrypt_key_pem(cert.private_key_pem, b"letmein")
cls.ca_path = os.path.join(cls.tmpdir, "ca.pem")
cls.cert_combined_path = os.path.join(cls.tmpdir, "server.combined.pem")
cls.cert_path = os.path.join(cls.tmpdir, "server.pem")
cls.key_path = os.path.join(cls.tmpdir, "key.pem")
cls.password_key_path = os.path.join(cls.tmpdir, "password_key.pem")
ca.cert_pem.write_to_path(cls.ca_path)
cert.private_key_and_cert_chain_pem.write_to_path(cls.cert_combined_path)
cert.cert_chain_pems[0].write_to_path(cls.cert_path)
cert.private_key_pem.write_to_path(cls.key_path)
encrypted_key.write_to_path(cls.password_key_path)
@classmethod
def teardown_class(cls) -> None:
shutil.rmtree(cls.tmpdir)
def _wrap_in_ssl(self, sock: socket.socket) -> ssl.SSLSocket:
"""
Given a single socket, wraps it in TLS.
"""
return original_ssl_wrap_socket(
sock,
ssl_version=ssl.PROTOCOL_SSLv23,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_path,
certfile=self.cert_path,
keyfile=self.key_path,
server_side=True,
)
def test_client_certs_two_files(self) -> None:
"""
Having a client cert in a separate file to its associated key works
properly.
"""
done_receiving = Event()
client_certs = []
def socket_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
sock = self._wrap_in_ssl(sock)
client_certs.append(sock.getpeercert())
data = b""
while not data.endswith(b"\r\n\r\n"):
data += sock.recv(8192)
sock.sendall(
b"HTTP/1.1 200 OK\r\n"
b"Server: testsocket\r\n"
b"Connection: close\r\n"
b"Content-Length: 6\r\n"
b"\r\n"
b"Valid!"
)
done_receiving.wait(5)
sock.close()
self._start_server(socket_handler)
with HTTPSConnectionPool(
self.host,
self.port,
cert_file=self.cert_path,
key_file=self.key_path,
cert_reqs="REQUIRED",
ca_certs=self.ca_path,
) as pool:
pool.request("GET", "/", retries=0)
done_receiving.set()
assert len(client_certs) == 1
def test_client_certs_one_file(self) -> None:
"""
Having a client cert and its associated private key in just one file
works properly.
"""
done_receiving = Event()
client_certs = []
def socket_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
sock = self._wrap_in_ssl(sock)
client_certs.append(sock.getpeercert())
data = b""
while not data.endswith(b"\r\n\r\n"):
data += sock.recv(8192)
sock.sendall(
b"HTTP/1.1 200 OK\r\n"
b"Server: testsocket\r\n"
b"Connection: close\r\n"
b"Content-Length: 6\r\n"
b"\r\n"
b"Valid!"
)
done_receiving.wait(5)
sock.close()
self._start_server(socket_handler)
with HTTPSConnectionPool(
self.host,
self.port,
cert_file=self.cert_combined_path,
cert_reqs="REQUIRED",
ca_certs=self.ca_path,
) as pool:
pool.request("GET", "/", retries=0)
done_receiving.set()
assert len(client_certs) == 1
def test_missing_client_certs_raises_error(self) -> None:
"""
Having client certs not be present causes an error.
"""
done_receiving = Event()
def socket_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
try:
self._wrap_in_ssl(sock)
except ssl.SSLError:
pass
done_receiving.wait(5)
sock.close()
self._start_server(socket_handler)
with HTTPSConnectionPool(
self.host, self.port, cert_reqs="REQUIRED", ca_certs=self.ca_path
) as pool:
with pytest.raises(MaxRetryError):
pool.request("GET", "/", retries=0)
done_receiving.set()
done_receiving.set()
def test_client_cert_with_string_password(self) -> None:
self.run_client_cert_with_password_test("letmein")
def test_client_cert_with_bytes_password(self) -> None:
self.run_client_cert_with_password_test(b"letmein")
def run_client_cert_with_password_test(self, password: bytes | str) -> None:
"""
Tests client certificate password functionality
"""
done_receiving = Event()
client_certs = []
def socket_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
sock = self._wrap_in_ssl(sock)
client_certs.append(sock.getpeercert())
data = b""
while not data.endswith(b"\r\n\r\n"):
data += sock.recv(8192)
sock.sendall(
b"HTTP/1.1 200 OK\r\n"
b"Server: testsocket\r\n"
b"Connection: close\r\n"
b"Content-Length: 6\r\n"
b"\r\n"
b"Valid!"
)
done_receiving.wait(5)
sock.close()
self._start_server(socket_handler)
assert ssl_.SSLContext is not None
ssl_context = ssl_.SSLContext(ssl_.PROTOCOL_SSLv23)
ssl_context.load_cert_chain(
certfile=self.cert_path, keyfile=self.password_key_path, password=password
)
with HTTPSConnectionPool(
self.host,
self.port,
ssl_context=ssl_context,
cert_reqs="REQUIRED",
ca_certs=self.ca_path,
) as pool:
pool.request("GET", "/", retries=0)
done_receiving.set()
assert len(client_certs) == 1
def test_load_keyfile_with_invalid_password(self) -> None:
assert ssl_.SSLContext is not None
context = ssl_.SSLContext(ssl_.PROTOCOL_SSLv23)
with pytest.raises(ssl.SSLError):
context.load_cert_chain(
certfile=self.cert_path,
keyfile=self.password_key_path,
password=b"letmei",
)
def test_load_invalid_cert_file(self) -> None:
assert ssl_.SSLContext is not None
context = ssl_.SSLContext(ssl_.PROTOCOL_SSLv23)
with pytest.raises(ssl.SSLError):
context.load_cert_chain(certfile=self.password_key_path)
|
TestClientCerts
|
python
|
Netflix__metaflow
|
metaflow/cmd/develop/__init__.py
|
{
"start": 113,
"end": 689
}
|
class ____:
def __init__(self):
pass
@click.group()
@click.pass_context
def cli(ctx):
pass
@cli.group(help="Metaflow develop commands")
@click.option(
"--quiet/--no-quiet",
show_default=True,
default=False,
help="Suppress unnecessary messages",
)
@click.pass_context
def develop(
ctx: Any,
quiet: bool,
):
if quiet:
echo = echo_dev_null
else:
echo = echo_always
obj = CommandObj()
obj.quiet = quiet
obj.echo = echo
obj.echo_always = echo_always
ctx.obj = obj
from . import stubs
|
CommandObj
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/type_spec.py
|
{
"start": 30376,
"end": 40985
}
|
class ____(TypeSpec, metaclass=abc.ABCMeta):
"""TypeSpec with a batchable tensor encoding.
The batchable tensor encoding is a list of `tf.Tensor`s that supports
batching and unbatching. In particular, stacking (or unstacking)
values with the same `TypeSpec` must be equivalent to stacking (or
unstacking) each of their tensor lists. Unlike the component encoding
(returned by `self._to_components)`, the batchable tensor encoding
may require using encoding/decoding ops.
If a subclass's batchable tensor encoding is not simply a flattened version
of the component encoding, then the subclass must override `_to_tensor_list`,
`_from_tensor_list`, and _flat_tensor_specs`.
"""
__slots__ = []
__batch_encoder__ = LegacyTypeSpecBatchEncoder()
@abc.abstractmethod
def _batch(self, batch_size) -> TypeSpec:
"""Returns a TypeSpec representing a batch of objects with this TypeSpec.
Args:
batch_size: An `int` representing the number of elements in a batch, or
`None` if the batch size may vary.
Returns:
A `TypeSpec` representing a batch of objects with this TypeSpec.
"""
raise NotImplementedError(f"{type(self).__name__}._batch")
@abc.abstractmethod
def _unbatch(self) -> TypeSpec:
"""Returns a TypeSpec representing a single element this TypeSpec.
Returns:
A `TypeSpec` representing a single element of objects with this TypeSpec.
"""
raise NotImplementedError(f"{type(self).__name__}._unbatch")
# LINT.IfChange
@property
def _flat_tensor_specs(self) -> List[TypeSpec]:
"""A list of TensorSpecs compatible with self._to_tensor_list(v)."""
component_flat_tensor_specs = nest.map_structure(
functools.partial(get_batchable_flat_tensor_specs, context_spec=self),
self._component_specs)
return nest.flatten(component_flat_tensor_specs)
# LINT.ThenChange(//tensorflow/python/framework/type_utils.py:_specs_for_flat_tensors)
# Note that _specs_for_flat_tensors in type_utils.py must correspond
# _flat_tensor_specs in this class and any derived classes.
def _to_tensor_list(
self, value: composite_tensor.CompositeTensor
) -> List["core_types.Symbol"]:
"""Encodes `value` as a flat list of `core.Symbol`."""
component_tensor_lists = nest.map_structure(batchable_to_tensor_list,
self._component_specs,
self._to_components(value))
return nest.flatten(component_tensor_lists)
def _to_batched_tensor_list(
self, value: composite_tensor.CompositeTensor
) -> List["core_types.Symbol"]:
"""Encodes `value` as a flat list of `core.Symbol` each with rank>0."""
get_spec_tensor_list = lambda spec, v: ( # pylint: disable=g-long-lambda
batchable_to_tensor_list(spec, v, minimum_rank=1)
if isinstance(spec, BatchableTypeSpec) else spec._to_tensor_list(v)) # pylint: disable=protected-access
component_batched_tensor_lists = nest.map_structure(
get_spec_tensor_list, self._component_specs, self._to_components(value))
tensor_list = nest.flatten(component_batched_tensor_lists)
if any(t.shape.ndims == 0 for t in tensor_list):
raise ValueError(
f"While converting {value} to a list of tensors for batching, "
f"found a scalar item which cannot be batched.")
return tensor_list
def _from_compatible_tensor_list(
self,
tensor_list: List["core_types.Symbol"]
) -> composite_tensor.CompositeTensor:
"""Reconstructs a value from a compatible flat list of `core.Symbol`."""
flat_specs = nest.map_structure(
functools.partial(get_batchable_flat_tensor_specs, context_spec=self),
self._component_specs)
nested_tensor_list = nest.pack_sequence_as(flat_specs, tensor_list)
components = nest.map_structure_up_to(self._component_specs,
batchable_from_tensor_list,
self._component_specs,
nested_tensor_list)
return self._from_components(components)
def get_batchable_flat_tensor_specs(spec, context_spec=None):
"""Returns the flat tensor specs for `spec`."""
if isinstance(spec, internal.TensorSpec):
return [spec]
elif hasattr(spec, "__batch_encoder__"):
encoding_specs = nest.map_structure(
functools.partial(
get_batchable_flat_tensor_specs, context_spec=context_spec),
spec.__batch_encoder__.encoding_specs(spec))
return nest.flatten(encoding_specs)
else:
# TODO(edloper) Fix existing CompositeTensors that permit this, and
# then turn this warning into an error.
warnings.warn(f"Batchable type {context_spec} contains non-batchable "
f"field or component with type {spec}.")
return spec._flat_tensor_specs # pylint: disable=protected-access
def batchable_to_tensor_list(spec, value, minimum_rank=0):
"""Returns a list of tensors encoding `value`, whose type is `spec`."""
if isinstance(spec, internal.TensorSpec):
return [value]
elif hasattr(spec, "__batch_encoder__"):
encoded_value = spec.__batch_encoder__.encode(spec, value, minimum_rank)
encoded_specs = spec.__batch_encoder__.encoding_specs(spec)
encoded_flats = nest.map_structure(
functools.partial(batchable_to_tensor_list, minimum_rank=minimum_rank),
encoded_specs, encoded_value)
return nest.flatten(encoded_flats)
else:
return spec._to_tensor_list(value) # pylint: disable=protected-access
def batchable_from_tensor_list(spec, tensor_list):
"""Returns a value with type `spec` decoded from `tensor_list`."""
if isinstance(spec, internal.TensorSpec):
assert len(tensor_list) == 1
return tensor_list[0]
elif hasattr(spec, "__batch_encoder__"):
encoded_specs = spec.__batch_encoder__.encoding_specs(spec)
flat_specs = nest.map_structure(get_batchable_flat_tensor_specs,
encoded_specs)
encoded_flats = nest.pack_sequence_as(flat_specs, tensor_list)
encoded_value = nest.map_structure_up_to(encoded_specs,
batchable_from_tensor_list,
encoded_specs, encoded_flats)
return spec.__batch_encoder__.decode(spec, encoded_value)
else:
return spec._from_compatible_tensor_list(tensor_list) # pylint: disable=protected-access
@tf_export("type_spec_from_value")
def type_spec_from_value(value) -> TypeSpec:
"""Returns a `tf.TypeSpec` that represents the given `value`.
Examples:
>>> tf.type_spec_from_value(tf.constant([1, 2, 3]))
TensorSpec(shape=(3,), dtype=tf.int32, name=None)
>>> tf.type_spec_from_value(np.array([4.0, 5.0], np.float64))
TensorSpec(shape=(2,), dtype=tf.float64, name=None)
>>> tf.type_spec_from_value(tf.ragged.constant([[1, 2], [3, 4, 5]]))
RaggedTensorSpec(TensorShape([2, None]), tf.int32, 1, tf.int64)
>>> example_input = tf.ragged.constant([[1, 2], [3]])
>>> @tf.function(input_signature=[tf.type_spec_from_value(example_input)])
... def f(x):
... return tf.reduce_sum(x, axis=1)
Args:
value: A value that can be accepted or returned by TensorFlow APIs. Accepted
types for `value` include `tf.Tensor`, any value that can be converted to
`tf.Tensor` using `tf.convert_to_tensor`, and any subclass of
`CompositeTensor` (such as `tf.RaggedTensor`).
Returns:
A `TypeSpec` that is compatible with `value`.
Raises:
TypeError: If a TypeSpec cannot be built for `value`, because its type
is not supported.
"""
spec = _type_spec_from_value(value)
if spec is not None:
return spec
# Fallback: try converting value to a tensor.
try:
tensor = tensor_conversion_registry.convert(value)
spec = _type_spec_from_value(tensor)
if spec is not None:
return spec
except (ValueError, TypeError) as e:
logging.vlog(
3, "Failed to convert %r to tensor: %s" % (type(value).__name__, e))
raise TypeError(f"Could not build a TypeSpec for {value} of "
f"unsupported type {type(value)}.")
def _type_spec_from_value(value) -> TypeSpec:
"""Returns a `TypeSpec` that represents the given `value`."""
if isinstance(value, core_types.Symbol):
# Note: we do not include Tensor names when constructing TypeSpecs.
return trace_type.from_value(value)
if isinstance(value, composite_tensor.CompositeTensor):
return value._type_spec # pylint: disable=protected-access
# If `value` is a list and all of its elements can be represented by the same
# batchable type spec, then we can represent the entire list using a single
# type spec that captures the type accurately (unlike the `convert_to_tensor`
# fallback).
if isinstance(value, list) and value:
subspecs = [_type_spec_from_value(v) for v in value]
if isinstance(subspecs[0], BatchableTypeSpec):
merged_subspec = subspecs[0].most_specific_common_supertype(subspecs[1:])
if merged_subspec is not None:
return merged_subspec._batch(len(subspecs)) # pylint: disable=protected-access
for entry in reversed(_TYPE_CONVERSION_FUNCTION_REGISTRY):
type_object, converter_fn, allow_subclass = entry
if ((type(value) is type_object) or # pylint: disable=unidiomatic-typecheck
(allow_subclass and isinstance(value, type_object))):
return converter_fn(value)
return None
_TYPE_CONVERSION_FUNCTION_REGISTRY = []
def register_type_spec_from_value_converter(type_object,
converter_fn,
allow_subclass=False):
"""Registers a function for converting values with a given type to TypeSpecs.
If multiple registered `type_object`s match a value, then the most recent
registration takes precedence. Custom converters should not be defined for
`CompositeTensor`s; use `CompositeTensor._type_spec` instead.
Args:
type_object: A Python `type` object representing the type of values accepted
by `converter_fn`.
converter_fn: A function that takes one argument (an instance of the type
represented by `type_object`) and returns a `TypeSpec`.
allow_subclass: If true, then use `isinstance(value, type_object)` to check
for matches. If false, then use `type(value) is type_object`.
"""
_, type_object = tf_decorator.unwrap(type_object)
_TYPE_CONVERSION_FUNCTION_REGISTRY.append(
(type_object, converter_fn, allow_subclass))
|
BatchableTypeSpec
|
python
|
django__django
|
tests/queries/tests.py
|
{
"start": 162789,
"end": 163432
}
|
class ____(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name="foo")
a1 = Annotation.objects.create(tag=t, name="a1")
a2 = Annotation.objects.create(tag=t, name="a2")
a3 = Annotation.objects.create(tag=t, name="a3")
n = Note.objects.create(note="foo", misc="bar")
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
|
Ticket20101Tests
|
python
|
openai__openai-python
|
src/openai/resources/files.py
|
{
"start": 1171,
"end": 14032
}
|
class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> FilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return FilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> FilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return FilesWithStreamingResponse(self)
def create(
self,
*,
file: FileTypes,
purpose: FilePurpose,
expires_after: file_create_params.ExpiresAfter | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileObject:
"""Upload a file that can be used across various endpoints.
Individual files can be
up to 512 MB, and the size of all files uploaded by one organization can be up
to 1 TB.
- The Assistants API supports files up to 2 million tokens and of specific file
types. See the
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools)
for details.
- The Fine-tuning API only supports `.jsonl` files. The input also has certain
required formats for fine-tuning
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input)
or
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
models.
- The Batch API only supports `.jsonl` files up to 200 MB in size. The input
also has a specific required
[format](https://platform.openai.com/docs/api-reference/batch/request-input).
Please [contact us](https://help.openai.com/) if you need to increase these
storage limits.
Args:
file: The File object (not file name) to be uploaded.
purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
Flexible file type for any purpose - `evals`: Used for eval data sets
expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire
after 30 days and all other files are persisted until they are manually deleted.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"file": file,
"purpose": purpose,
"expires_after": expires_after,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/files",
body=maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
def retrieve(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileObject:
"""
Returns information about a specific file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
purpose: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[FileObject]:
"""Returns a list of files.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
10,000, and the default is 10,000.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
purpose: Only return files with the given purpose.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/files",
page=SyncCursorPage[FileObject],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"purpose": purpose,
},
file_list_params.FileListParams,
),
),
model=FileObject,
)
def delete(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileDeleted:
"""
Delete a file and remove it from all vector stores.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._delete(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileDeleted,
)
def content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@typing_extensions.deprecated("The `.content()` method should be used instead")
def retrieve_content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> str:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=str,
)
def wait_for_processing(
self,
id: str,
*,
poll_interval: float = 5.0,
max_wait_seconds: float = 30 * 60,
) -> FileObject:
"""Waits for the given file to be processed, default timeout is 30 mins."""
TERMINAL_STATES = {"processed", "error", "deleted"}
start = time.time()
file = self.retrieve(id)
while file.status not in TERMINAL_STATES:
self._sleep(poll_interval)
file = self.retrieve(id)
if time.time() - start > max_wait_seconds:
raise RuntimeError(
f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds."
)
return file
|
Files
|
python
|
doocs__leetcode
|
lcci/01.07.Rotate Matrix/Solution.py
|
{
"start": 0,
"end": 376
}
|
class ____:
def rotate(self, matrix: List[List[int]]) -> None:
n = len(matrix)
for i in range(n >> 1):
for j in range(n):
matrix[i][j], matrix[n - i - 1][j] = matrix[n - i - 1][j], matrix[i][j]
for i in range(n):
for j in range(i):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-snowflake-cortex/destination_snowflake_cortex/cortex_processor.py
|
{
"start": 3877,
"end": 15716
}
|
class ____(SqlProcessorBase):
"""A Snowflake implementation for use with Cortex functions."""
supports_merge_insert = False
"""We use the emulated merge code path because each primary key has multiple rows (chunks)."""
sql_config: SnowflakeCortexConfig
"""The configuration for the Snowflake processor, including the vector length."""
splitter_config: DocumentSplitterConfig
"""The configuration for the document splitter."""
file_writer_class = JsonlWriter
type_converter_class: type[SnowflakeTypeConverter] = SnowflakeTypeConverter
def __init__(
self,
sql_config: SnowflakeCortexConfig,
splitter_config: DocumentSplitterConfig,
embedder_config: EmbeddingConfig,
catalog_provider: CatalogProvider,
temp_dir: Path,
temp_file_cleanup: bool = True,
) -> None:
"""Initialize the Snowflake processor."""
self.splitter_config = splitter_config
self.embedder_config = embedder_config
super().__init__(
sql_config=sql_config,
catalog_provider=catalog_provider,
temp_dir=temp_dir,
temp_file_cleanup=temp_file_cleanup,
)
def _get_sql_column_definitions(
self,
stream_name: str,
) -> dict[str, sqlalchemy.types.TypeEngine]:
"""Return the column definitions for the given stream.
Return the static static column definitions for cortex streams.
"""
_ = stream_name # unused
return {
DOCUMENT_ID_COLUMN: self.type_converter_class.get_string_type(),
CHUNK_ID_COLUMN: self.type_converter_class.get_string_type(),
METADATA_COLUMN: self.type_converter_class.get_json_type(),
DOCUMENT_CONTENT_COLUMN: self.type_converter_class.get_json_type(),
EMBEDDING_COLUMN: f"VECTOR(FLOAT, {self.embedding_dimensions})",
}
@overrides
def _write_files_to_new_table(
self,
files: list[Path],
stream_name: str,
batch_id: str,
) -> str:
"""Write files to a new table.
This is the same as PyAirbyte's SnowflakeSqlProcessor implementation, migrated here for
stability. The main differences lie within `_get_sql_column_definitions()`, whose logic is
abstracted out of this method.
"""
temp_table_name = self._create_table_for_loading(
stream_name=stream_name,
batch_id=batch_id,
)
internal_sf_stage_name = f"@%{temp_table_name}"
def path_str(path: Path) -> str:
return str(path.absolute()).replace("\\", "\\\\")
for file_path in files:
query = f"PUT 'file://{path_str(file_path)}' {internal_sf_stage_name};"
self._execute_sql(query)
columns_list = [
self._quote_identifier(c)
for c in list(self._get_sql_column_definitions(stream_name).keys())
]
files_list = ", ".join([f"'{f.name}'" for f in files])
columns_list_str: str = indent("\n, ".join(columns_list), " " * 12)
# following block is different from SnowflakeSqlProcessor
vector_suffix = f"::Vector(Float, {self.embedding_dimensions})"
variant_cols_str: str = ("\n" + " " * 21 + ", ").join([
f"$1:{col}{vector_suffix if 'embedding' in col else ''}" for col in columns_list
])
if self.sql_config.cortex_embedding_model: # Currently always false
# WARNING: This is untested and may not work as expected.
variant_cols_str += f"snowflake.cortex.embed('{self.sql_config.cortex_embedding_model}', $1:{DOCUMENT_CONTENT_COLUMN})"
copy_statement = dedent(
f"""
COPY INTO {temp_table_name}
(
{columns_list_str}
)
FROM (
SELECT {variant_cols_str}
FROM {internal_sf_stage_name}
)
FILES = ( {files_list} )
FILE_FORMAT = ( TYPE = JSON, COMPRESSION = GZIP )
;
"""
)
self._execute_sql(copy_statement)
return temp_table_name
@overrides
def _init_connection_settings(self, connection: Connection) -> None:
"""We set Snowflake-specific settings for the session.
This sets QUOTED_IDENTIFIERS_IGNORE_CASE setting to True, which is necessary because
Snowflake otherwise will treat quoted table and column references as case-sensitive.
More info: https://docs.snowflake.com/en/sql-reference/identifiers-syntax
This also sets MULTI_STATEMENT_COUNT to 0, which allows multi-statement commands.
"""
connection.execute(
"""
ALTER SESSION SET
QUOTED_IDENTIFIERS_IGNORE_CASE = TRUE
MULTI_STATEMENT_COUNT = 0
"""
)
def _emulated_merge_temp_table_to_final_table(
self,
stream_name: str,
temp_table_name: str,
final_table_name: str,
) -> None:
"""Emulate the merge operation using a series of SQL commands.
This method varies from the SnowflakeSqlProcessor implementation in that multiple rows will exist for each
primary key. And we need to remove all rows (chunks) for a given primary key before inserting new ones.
So instead of using UPDATE and then INSERT, we will DELETE all rows for included primary keys and then call
the append implementation to insert new rows.
"""
columns_list: list[str] = list(
self._get_sql_column_definitions(stream_name=stream_name).keys()
)
delete_statement = dedent(
f"""
DELETE FROM {final_table_name}
WHERE {DOCUMENT_ID_COLUMN} IN (
SELECT {DOCUMENT_ID_COLUMN}
FROM {temp_table_name}
);
"""
)
append_statement = dedent(
f"""
INSERT INTO {final_table_name}
({", ".join(columns_list)})
SELECT {", ".join(columns_list)}
FROM {temp_table_name};
"""
)
with self.get_sql_connection() as conn:
# This is a transactional operation to avoid outages, in case
# a user queries the data during the operation.
conn.execute(delete_statement)
conn.execute(append_statement)
def process_record_message(
self,
record_msg: AirbyteRecordMessage,
stream_schema: dict,
) -> None:
"""Write a record to the cache.
We override the SQLProcessor implementation in order to handle chunking, embedding, etc.
This method is called for each record message, before the record is written to local file.
"""
document_chunks, id_to_delete = self.splitter.process(record_msg)
# TODO: Decide if we need to incorporate this into the final implementation:
_ = id_to_delete
if not self.sql_config.cortex_embedding_model:
embeddings = self.embedder.embed_documents(
# TODO: Check this: Expects a list of documents, not chunks (docs are inconsistent)
documents=document_chunks,
)
for i, chunk in enumerate(document_chunks, start=0):
new_data: dict[str, Any] = {
DOCUMENT_ID_COLUMN: self._create_document_id(record_msg),
CHUNK_ID_COLUMN: str(uuid.uuid4().int),
METADATA_COLUMN: chunk.metadata,
DOCUMENT_CONTENT_COLUMN: chunk.page_content,
EMBEDDING_COLUMN: None,
}
if not self.sql_config.cortex_embedding_model:
new_data[EMBEDDING_COLUMN] = embeddings[i]
self.file_writer.process_record_message(
record_msg=AirbyteRecordMessage(
namespace=record_msg.namespace,
stream=record_msg.stream,
data=new_data,
emitted_at=record_msg.emitted_at,
),
stream_schema={
"type": "object",
"properties": {
DOCUMENT_ID_COLUMN: {"type": "string"},
CHUNK_ID_COLUMN: {"type": "string"},
METADATA_COLUMN: {"type": "object"},
DOCUMENT_CONTENT_COLUMN: {"type": "string"},
EMBEDDING_COLUMN: {
"type": "array",
"items": {"type": "float"},
},
},
},
)
def _get_table_by_name(
self,
table_name: str,
*,
force_refresh: bool = False,
shallow_okay: bool = False,
) -> sqlalchemy.Table:
"""Return a table object from a table name.
Workaround: Until `VECTOR` type is supported by the Snowflake SQLAlchemy dialect, we will
return a table with fixed columns. This is a temporary solution until the dialect is updated.
Tracking here: https://github.com/snowflakedb/snowflake-sqlalchemy/issues/499
"""
_ = force_refresh, shallow_okay # unused
table = sqlalchemy.Table(
table_name,
sqlalchemy.MetaData(),
)
for column_name, column_type in self._get_sql_column_definitions(table_name).items():
table.append_column(
sqlalchemy.Column(
column_name,
column_type,
primary_key=column_name in [DOCUMENT_ID_COLUMN, CHUNK_ID_COLUMN],
)
)
return table
def _add_missing_columns_to_table(
self,
stream_name: str,
table_name: str,
) -> None:
"""Add missing columns to the table.
This is a no-op because metadata scans do not work with the `VECTOR` data type.
"""
pass
@property
def embedder(self) -> embedder.Embedder:
return embedder.create_from_config(
embedding_config=self.embedder_config, # type: ignore [arg-type] # No common base class
processing_config=self.splitter_config,
)
@property
def embedding_dimensions(self) -> int:
"""Return the number of dimensions for the embeddings."""
return self.embedder.embedding_dimensions
@property
def splitter(self) -> DocumentSplitter:
return DocumentSplitter(
config=self.splitter_config,
catalog=self.catalog_provider.configured_catalog,
)
def _create_document_id(self, record_msg: AirbyteRecordMessage) -> str:
"""Create document id based on the primary key values. Returns a random uuid if no primary key is found"""
stream_name = record_msg.stream
primary_key = self._get_record_primary_key(record_msg=record_msg)
if primary_key is not None:
return f"Stream_{stream_name}_Key_{primary_key}"
return str(uuid.uuid4().int)
def _get_record_primary_key(self, record_msg: AirbyteRecordMessage) -> str | None:
"""Create primary key for the record by appending the primary keys."""
stream_name = record_msg.stream
primary_keys = self._get_primary_keys(stream_name)
if not primary_keys:
return None
primary_key = []
for key in primary_keys:
try:
primary_key.append(str(dpath.get(record_msg.data, key)))
except KeyError:
primary_key.append("__not_found__")
# return a stringified version of all primary keys
stringified_primary_key = "_".join(primary_key)
return stringified_primary_key
|
SnowflakeCortexSqlProcessor
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1409589,
"end": 1409970
}
|
class ____(sgqlc.types.Type, Node, AuditEntry, RepositoryAuditEntryData, OrganizationAuditEntryData):
"""Audit log entry for a repo.destroy event."""
__schema__ = github_schema
__field_names__ = ("visibility",)
visibility = sgqlc.types.Field(RepoDestroyAuditEntryVisibility, graphql_name="visibility")
"""The visibility of the repository"""
|
RepoDestroyAuditEntry
|
python
|
PyCQA__pylint
|
pylint/checkers/utils.py
|
{
"start": 15801,
"end": 80376
}
|
class ____(Exception):
"""A format character in a format string is not one of the supported
format characters.
"""
def __init__(self, index: int) -> None:
super().__init__(index)
self.index = index
def parse_format_string(
format_string: str,
) -> tuple[set[str], int, dict[str, str], list[str]]:
"""Parses a format string, returning a tuple (keys, num_args).
Where 'keys' is the set of mapping keys in the format string, and 'num_args' is the number
of arguments required by the format string. Raises IncompleteFormatString or
UnsupportedFormatCharacter if a parse error occurs.
"""
keys = set()
key_types = {}
pos_types = []
num_args = 0
def next_char(i: int) -> tuple[int, str]:
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == "%":
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == "(":
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == "(":
depth += 1
elif char == ")":
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in "#0- +":
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == ".":
i, char = next_char(i)
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in "hlL":
i, char = next_char(i)
# Parse the conversion type (mandatory).
flags = "diouxXeEfFgGcrs%a"
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
key_types[key] = char
elif char != "%":
num_args += 1
pos_types.append(char)
i += 1
return keys, num_args, key_types, pos_types
def split_format_field_names(
format_string: str,
) -> tuple[str, Iterable[tuple[bool, str]]]:
try:
return _string.formatter_field_name_split(format_string) # type: ignore[no-any-return]
except ValueError as e:
raise IncompleteFormatString() from e
def collect_string_fields(format_string: str) -> Iterable[str | None]:
"""Given a format string, return an iterator
of all the valid format fields.
It handles nested fields as well.
"""
formatter = string.Formatter()
# pylint: disable = too-many-try-statements
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
yield from collect_string_fields(nested)
except ValueError as exc:
# Probably the format string is invalid.
if exc.args[0].startswith("cannot switch from manual"):
# On Jython, parsing a string with both manual
# and automatic positions will fail with a ValueError,
# while on CPython it will simply return the fields,
# the validation being done in the interpreter (?).
# We're just returning two mixed fields in order
# to trigger the format-combined-specification check.
yield ""
yield "1"
return
raise IncompleteFormatString(format_string) from exc
def parse_format_method_string(
format_string: str,
) -> tuple[list[tuple[str, list[tuple[bool, str]]]], int, int]:
"""Parses a PEP 3101 format string, returning a tuple of
(keyword_arguments, implicit_pos_args_cnt, explicit_pos_args).
keyword_arguments is the set of mapping keys in the format string, implicit_pos_args_cnt
is the number of arguments required by the format string and
explicit_pos_args is the number of arguments passed with the position.
"""
keyword_arguments = []
implicit_pos_args_cnt = 0
explicit_pos_args = set()
for name in collect_string_fields(format_string):
if name and str(name).isdigit():
explicit_pos_args.add(str(name))
elif name:
keyname, fielditerator = split_format_field_names(name)
if isinstance(keyname, numbers.Number):
explicit_pos_args.add(str(keyname))
try:
keyword_arguments.append((keyname, list(fielditerator)))
except ValueError as e:
raise IncompleteFormatString() from e
else:
implicit_pos_args_cnt += 1
return keyword_arguments, implicit_pos_args_cnt, len(explicit_pos_args)
def is_attr_protected(attrname: str) -> bool:
"""Return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return (
attrname[0] == "_"
and attrname != "_"
and not (attrname.startswith("__") and attrname.endswith("__"))
)
def node_frame_class(node: nodes.NodeNG) -> nodes.ClassDef | None:
"""Return the class that is wrapping the given node.
The function returns a class for a method node (or a staticmethod or a
classmethod), otherwise it returns `None`.
"""
klass = node.frame()
nodes_to_check = (
nodes.NodeNG,
astroid.UnboundMethod,
astroid.BaseInstance,
)
while (
klass
and isinstance(klass, nodes_to_check)
and not isinstance(klass, nodes.ClassDef)
):
if klass.parent is None:
return None
klass = klass.parent.frame()
return klass
def get_outer_class(class_node: nodes.ClassDef) -> nodes.ClassDef | None:
"""Return the class that is the outer class of given (nested) class_node."""
parent_klass = class_node.parent.frame()
return parent_klass if isinstance(parent_klass, nodes.ClassDef) else None
def is_attr_private(attrname: str) -> Match[str] | None:
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore).
"""
regex = re.compile("^_{2,10}.*[^_]+_?$")
return regex.match(attrname)
def get_argument_from_call(
call_node: nodes.Call, position: int | None = None, keyword: str | None = None
) -> nodes.Name:
"""Returns the specified argument from a function call.
:param nodes.Call call_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: nodes.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError("Must specify at least one of: position or keyword.")
if position is not None:
try:
return call_node.args[position]
except IndexError:
pass
if keyword and call_node.keywords:
for arg in call_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def infer_kwarg_from_call(call_node: nodes.Call, keyword: str) -> nodes.Name | None:
"""Returns the specified argument from a function's kwargs.
:param nodes.Call call_node: Node representing a function call to check.
:param str keyword: Name of the argument to be extracted.
:returns: The node representing the argument, None if the argument is not found.
:rtype: nodes.Name
"""
for arg in call_node.kwargs:
inferred = safe_infer(arg.value)
if isinstance(inferred, nodes.Dict):
for item in inferred.items:
if item[0].value == keyword:
return item[1]
return None
def inherit_from_std_ex(node: nodes.NodeNG | astroid.Instance) -> bool:
"""Return whether the given class node is subclass of
exceptions.Exception.
"""
ancestors = node.ancestors() if hasattr(node, "ancestors") else []
return any(
ancestor.name in {"Exception", "BaseException"}
and ancestor.root().name == EXCEPTIONS_MODULE
for ancestor in itertools.chain([node], ancestors)
)
def error_of_type(
handler: nodes.ExceptHandler,
error_type: str | type[Exception] | tuple[str | type[Exception], ...],
) -> bool:
"""Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error: str | type[Exception]) -> str:
if not isinstance(error, str):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type,)
expected_errors = {stringify_error(error) for error in error_type}
if not handler.type:
return False
return handler.catch(expected_errors) # type: ignore[no-any-return]
def decorated_with_property(node: nodes.FunctionDef) -> bool:
"""Detect if the given function node is decorated with a property."""
if not node.decorators:
return False
for decorator in node.decorators.nodes:
try:
if _is_property_decorator(decorator):
return True
except astroid.InferenceError:
pass
return False
def _is_property_kind(node: nodes.NodeNG, *kinds: str) -> bool:
if not isinstance(node, (astroid.UnboundMethod, nodes.FunctionDef)):
return False
if node.decorators:
for decorator in node.decorators.nodes:
if isinstance(decorator, nodes.Attribute) and decorator.attrname in kinds:
return True
return False
def is_property_setter(node: nodes.NodeNG) -> bool:
"""Check if the given node is a property setter."""
return _is_property_kind(node, "setter")
def is_property_deleter(node: nodes.NodeNG) -> bool:
"""Check if the given node is a property deleter."""
return _is_property_kind(node, "deleter")
def is_property_setter_or_deleter(node: nodes.NodeNG) -> bool:
"""Check if the given node is either a property setter or a deleter."""
return _is_property_kind(node, "setter", "deleter")
def _is_property_decorator(decorator: nodes.Name) -> bool:
for inferred in decorator.infer():
if isinstance(inferred, nodes.ClassDef):
if inferred.qname() in {"builtins.property", "functools.cached_property"}:
return True
for ancestor in inferred.ancestors():
if ancestor.name == "property" and ancestor.root().name == "builtins":
return True
elif isinstance(inferred, nodes.FunctionDef):
# If decorator is function, check if it has exactly one return
# and the return is itself a function decorated with property
returns: list[nodes.Return] = list(
inferred._get_return_nodes_skip_functions()
)
if len(returns) == 1 and isinstance(
returns[0].value, (nodes.Name, nodes.Attribute)
):
inferred = safe_infer(returns[0].value)
if (
inferred
and isinstance(inferred, objects.Property)
and isinstance(inferred.function, nodes.FunctionDef)
):
return decorated_with_property(inferred.function)
return False
def decorated_with(
func: (
nodes.ClassDef | nodes.FunctionDef | astroid.BoundMethod | astroid.UnboundMethod
),
qnames: Iterable[str],
) -> bool:
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
if isinstance(decorator_node, nodes.Call):
# We only want to infer the function name
decorator_node = decorator_node.func
try:
if any(
i.name in qnames or i.qname() in qnames
for i in decorator_node.infer()
if isinstance(i, (nodes.ClassDef, nodes.FunctionDef))
):
return True
except astroid.InferenceError:
continue
return False
def uninferable_final_decorators(
node: nodes.Decorators,
) -> list[nodes.Attribute | nodes.Name | None]:
"""Return a list of uninferable `typing.final` decorators in `node`.
This function is used to determine if the `typing.final` decorator is used
with an unsupported Python version; the decorator cannot be inferred when
using a Python version lower than 3.8.
"""
decorators = []
for decorator in getattr(node, "nodes", []):
import_nodes: tuple[nodes.Import | nodes.ImportFrom] | None = None
# Get the `Import` node. The decorator is of the form: @module.name
if isinstance(decorator, nodes.Attribute):
inferred = safe_infer(decorator.expr)
if isinstance(inferred, nodes.Module) and inferred.qname() == "typing":
_, import_nodes = decorator.expr.lookup(decorator.expr.name)
# Get the `ImportFrom` node. The decorator is of the form: @name
elif isinstance(decorator, nodes.Name):
_, import_nodes = decorator.lookup(decorator.name)
# The `final` decorator is expected to be found in the
# import_nodes. Continue if we don't find any `Import` or `ImportFrom`
# nodes for this decorator.
if not import_nodes:
continue
import_node = import_nodes[0]
if not isinstance(import_node, (nodes.Import, nodes.ImportFrom)):
continue
import_names = dict(import_node.names)
# Check if the import is of the form: `from typing import final`
is_from_import = ("final" in import_names) and import_node.modname == "typing"
# Check if the import is of the form: `import typing`
is_import = ("typing" in import_names) and getattr(
decorator, "attrname", None
) == "final"
if is_from_import or is_import:
inferred = safe_infer(decorator)
if inferred is None or isinstance(inferred, util.UninferableBase):
decorators.append(decorator)
return decorators
@lru_cache(maxsize=1024)
def unimplemented_abstract_methods(
node: nodes.ClassDef, is_abstract_cb: nodes.FunctionDef | None = None
) -> dict[str, nodes.FunctionDef]:
"""Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
It will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = partial(decorated_with, qnames=ABC_METHODS)
visited: dict[str, nodes.FunctionDef] = {}
try:
mro = reversed(node.mro())
except astroid.ResolveError:
# Probably inconsistent hierarchy, don't try to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
inferred = obj
if isinstance(obj, nodes.AssignName):
inferred = safe_infer(obj)
if not inferred:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(inferred, nodes.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(inferred, nodes.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(inferred)
if abstract:
visited[obj.name] = inferred
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def find_try_except_wrapper_node(
node: nodes.NodeNG,
) -> nodes.ExceptHandler | nodes.Try | None:
"""Return the ExceptHandler or the Try node in which the node is."""
current = node
ignores = (nodes.ExceptHandler, nodes.Try)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def find_except_wrapper_node_in_scope(
node: nodes.NodeNG,
) -> nodes.ExceptHandler | None:
"""Return the ExceptHandler in which the node is, without going out of scope."""
for current in node.node_ancestors():
match current:
case nodes.LocalsDictNodeNG():
# If we're inside a function/class definition, we don't want to keep checking
# higher ancestors for `except` clauses, because if these exist, it means our
# function/class was defined in an `except` clause, rather than the current code
# actually running in an `except` clause.
return None
case nodes.ExceptHandler():
return current
return None
def is_from_fallback_block(node: nodes.NodeNG) -> bool:
"""Check if the given node is from a fallback import block."""
context = find_try_except_wrapper_node(node)
if not context:
return False
if isinstance(context, nodes.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers
)
handlers = context.handlers
has_fallback_imports = any(
isinstance(import_node, (nodes.ImportFrom, nodes.Import))
for import_node in other_body
)
ignores_import_error = _except_handlers_ignores_exceptions(
handlers, (ImportError, ModuleNotFoundError)
)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exceptions(
handlers: nodes.ExceptHandler,
exceptions: tuple[type[ImportError], type[ModuleNotFoundError]],
) -> bool:
func = partial(error_of_type, error_type=exceptions)
return any(func(handler) for handler in handlers)
def get_exception_handlers(
node: nodes.NodeNG, exception: type[Exception] | str = Exception
) -> list[nodes.ExceptHandler] | None:
"""Return the collections of handlers handling the exception in arguments.
Args:
node (nodes.NodeNG): A node that is potentially wrapped in a try except.
exception (builtin.Exception or str): exception or name of the exception.
Returns:
list: the collection of handlers that are handling the exception or None.
"""
context = find_try_except_wrapper_node(node)
if isinstance(context, nodes.Try):
return [
handler for handler in context.handlers if error_of_type(handler, exception)
]
return []
def get_contextlib_with_statements(node: nodes.NodeNG) -> Iterator[nodes.With]:
"""Get all contextlib.with statements in the ancestors of the given node."""
for with_node in node.node_ancestors():
if isinstance(with_node, nodes.With):
yield with_node
def _suppresses_exception(
call: nodes.Call, exception: type[Exception] | str = Exception
) -> bool:
"""Check if the given node suppresses the given exception."""
if not isinstance(exception, str):
exception = exception.__name__
for arg in call.args:
match inferred := safe_infer(arg):
case nodes.ClassDef():
if inferred.name == exception:
return True
case nodes.Tuple():
for elt in inferred.elts:
inferred_elt = safe_infer(elt)
if (
isinstance(inferred_elt, nodes.ClassDef)
and inferred_elt.name == exception
):
return True
return False
def get_contextlib_suppressors(
node: nodes.NodeNG, exception: type[Exception] | str = Exception
) -> Iterator[nodes.With]:
"""Return the contextlib suppressors handling the exception.
Args:
node (nodes.NodeNG): A node that is potentially wrapped in a contextlib.suppress.
exception (builtin.Exception): exception or name of the exception.
Yields:
nodes.With: A with node that is suppressing the exception.
"""
for with_node in get_contextlib_with_statements(node):
for item, _ in with_node.items:
if isinstance(item, nodes.Call):
inferred = safe_infer(item.func)
if (
isinstance(inferred, nodes.ClassDef)
and inferred.qname() == "contextlib.suppress"
):
if _suppresses_exception(item, exception):
yield with_node
def is_node_inside_try_except(node: nodes.Raise) -> bool:
"""Check if the node is directly under a Try/Except statement
(but not under an ExceptHandler!).
Args:
node (nodes.Raise): the node raising the exception.
Returns:
bool: True if the node is inside a try/except statement, False otherwise.
"""
context = find_try_except_wrapper_node(node)
return isinstance(context, nodes.Try)
def node_ignores_exception(
node: nodes.NodeNG, exception: type[Exception] | str = Exception
) -> bool:
"""Check if the node is in a Try which handles the given exception.
If the exception is not given, the function is going to look for bare
excepts.
"""
managing_handlers = get_exception_handlers(node, exception)
if managing_handlers:
return True
return any(get_contextlib_suppressors(node, exception))
@lru_cache(maxsize=1024)
def class_is_abstract(node: nodes.ClassDef) -> bool:
"""Return true if the given class node should be considered as an abstract
class.
"""
# Protocol classes are considered "abstract"
if is_protocol_class(node):
return True
# Only check for explicit metaclass=ABCMeta on this specific class
meta = node.declared_metaclass()
if meta is not None:
if meta.name == "ABCMeta" and meta.root().name in ABC_MODULES:
return True
for ancestor in node.ancestors():
if ancestor.name == "ABC" and ancestor.root().name in ABC_MODULES:
# abc.ABC inheritance
return True
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _supports_protocol_method(value: nodes.NodeNG, attr: str) -> bool:
try:
attributes = value.getattr(attr)
except astroid.NotFoundError:
return False
first = attributes[0]
# Return False if a constant is assigned
if isinstance(first, nodes.AssignName):
this_assign_parent = get_node_first_ancestor_of_type(
first, (nodes.Assign, nodes.NamedExpr)
)
if this_assign_parent is None: # pragma: no cover
# Cannot imagine this being None, but return True to avoid false positives
return True
if isinstance(this_assign_parent.value, nodes.BaseContainer):
if all(isinstance(n, nodes.Const) for n in this_assign_parent.value.elts):
return False
if isinstance(this_assign_parent.value, nodes.Const):
return False
return True
def is_comprehension(node: nodes.NodeNG) -> bool:
comprehensions = (
nodes.ListComp,
nodes.SetComp,
nodes.DictComp,
nodes.GeneratorExp,
)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(
value, GETITEM_METHOD
) and _supports_protocol_method(value, KEYS_METHOD)
def _supports_membership_test_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, ITER_METHOD) or _supports_protocol_method(
value, GETITEM_METHOD
)
def _supports_async_iteration_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, AITER_METHOD)
def _supports_getitem_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, GETITEM_METHOD)
def _supports_setitem_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, SETITEM_METHOD)
def _supports_delitem_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, DELITEM_METHOD)
def _is_abstract_class_name(name: str) -> bool:
lname = name.lower()
is_mixin = lname.endswith("mixin")
is_abstract = lname.startswith("abstract")
is_base = lname.startswith("base") or lname.endswith("base")
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node: nodes.NodeNG) -> bool:
while node is not None:
if isinstance(node, nodes.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, "name", None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def _supports_protocol(
value: nodes.NodeNG, protocol_callback: Callable[[nodes.NodeNG], bool]
) -> bool:
match value:
case nodes.ClassDef():
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if protocol_callback(meta):
return True
case astroid.BaseInstance():
if not has_known_bases(value):
return True
if value.has_dynamic_getattr():
return True
if protocol_callback(value):
return True
case nodes.ComprehensionScope():
return True
case bases.Proxy(_proxied=astroid.BaseInstance() as p) if has_known_bases(p):
return protocol_callback(p)
return False
def is_iterable(value: nodes.NodeNG, check_async: bool = False) -> bool:
if check_async:
protocol_check = _supports_async_iteration_protocol
else:
protocol_check = _supports_iteration_protocol
return _supports_protocol(value, protocol_check)
def is_mapping(value: nodes.NodeNG) -> bool:
return _supports_protocol(value, _supports_mapping_protocol)
def supports_membership_test(value: nodes.NodeNG) -> bool:
supported = _supports_protocol(value, _supports_membership_test_protocol)
return supported or is_iterable(value)
def supports_getitem(value: nodes.NodeNG, node: nodes.NodeNG) -> bool:
if isinstance(value, nodes.ClassDef):
if _supports_protocol_method(value, CLASS_GETITEM_METHOD):
return True
if is_postponed_evaluation_enabled(node) and is_node_in_type_annotation_context(
node
):
return True
return _supports_protocol(value, _supports_getitem_protocol)
def supports_setitem(value: nodes.NodeNG, _: nodes.NodeNG) -> bool:
return _supports_protocol(value, _supports_setitem_protocol)
def supports_delitem(value: nodes.NodeNG, _: nodes.NodeNG) -> bool:
return _supports_protocol(value, _supports_delitem_protocol)
def _get_python_type_of_node(node: nodes.NodeNG) -> str | None:
pytype: Callable[[], str] | None = getattr(node, "pytype", None)
if callable(pytype):
return pytype()
return None
@lru_cache(maxsize=1024)
def safe_infer(
node: nodes.NodeNG,
context: InferenceContext | None = None,
*,
compare_constants: bool = False,
compare_constructors: bool = False,
) -> InferenceResult | None:
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred of different types).
If compare_constants is True and if multiple constants are inferred,
unequal inferred values are also considered ambiguous and return None.
If compare_constructors is True and if multiple classes are inferred,
constructors with different signatures are held ambiguous and return None.
"""
inferred_types: set[str | None] = set()
try:
infer_gen = node.infer(context=context)
value = next(infer_gen)
except astroid.InferenceError:
return None
except Exception as e: # pragma: no cover
raise AstroidError from e
if not isinstance(value, util.UninferableBase):
inferred_types.add(_get_python_type_of_node(value))
# pylint: disable = too-many-try-statements
try:
for inferred in infer_gen:
inferred_type = _get_python_type_of_node(inferred)
if inferred_type not in inferred_types:
return None # If there is ambiguity on the inferred node.
if (
compare_constants
and isinstance(inferred, nodes.Const)
and isinstance(value, nodes.Const)
and inferred.value != value.value
):
return None
if (
isinstance(inferred, nodes.FunctionDef)
and isinstance(value, nodes.FunctionDef)
and function_arguments_are_ambiguous(inferred, value)
):
return None
if (
compare_constructors
and isinstance(inferred, nodes.ClassDef)
and isinstance(value, nodes.ClassDef)
and class_constructors_are_ambiguous(inferred, value)
):
return None
except astroid.InferenceError:
return None # There is some kind of ambiguity
except StopIteration:
return value
except Exception as e: # pragma: no cover
raise AstroidError from e
return value if len(inferred_types) <= 1 else None
@lru_cache(maxsize=512)
def infer_all(
node: nodes.NodeNG, context: InferenceContext | None = None
) -> list[InferenceResult]:
try:
return list(node.infer(context=context))
except astroid.InferenceError:
return []
except Exception as e: # pragma: no cover
raise AstroidError from e
def function_arguments_are_ambiguous(
func1: nodes.FunctionDef, func2: nodes.FunctionDef
) -> bool:
if func1.argnames() != func2.argnames():
return True
# Check ambiguity among function default values
pairs_of_defaults = [
(func1.args.defaults, func2.args.defaults),
(func1.args.kw_defaults, func2.args.kw_defaults),
]
for zippable_default in pairs_of_defaults:
if None in zippable_default:
continue
if len(zippable_default[0]) != len(zippable_default[1]):
return True
for default1, default2 in zip(*zippable_default):
match (default1, default2):
case [nodes.Const(), nodes.Const()]:
return default1.value != default2.value # type: ignore[no-any-return]
case [nodes.Name(), nodes.Name()]:
return default1.name != default2.name # type: ignore[no-any-return]
case _:
return True
return False
def class_constructors_are_ambiguous(
class1: nodes.ClassDef, class2: nodes.ClassDef
) -> bool:
try:
constructor1 = class1.local_attr("__init__")[0]
constructor2 = class2.local_attr("__init__")[0]
except astroid.NotFoundError:
return False
if not isinstance(constructor1, nodes.FunctionDef):
return False
if not isinstance(constructor2, nodes.FunctionDef):
return False
return function_arguments_are_ambiguous(constructor1, constructor2)
def has_known_bases(
klass: nodes.ClassDef, context: InferenceContext | None = None
) -> bool:
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known # type: ignore[no-any-return]
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
if (
not isinstance(result, nodes.ClassDef)
or result is klass
or not has_known_bases(result, context=context)
):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def is_none(node: nodes.NodeNG) -> bool:
match node:
case None | nodes.Const(value=None) | nodes.Name(value="None"):
return True
return False
def node_type(node: nodes.NodeNG) -> SuccessfulInferenceResult | None:
"""Return the inferred type for `node`.
If there is more than one possible type, or if inferred type is Uninferable or None,
return None
"""
# check there is only one possible type for the assign node. Else we
# don't handle it for now
types: set[SuccessfulInferenceResult] = set()
try:
for var_type in node.infer():
if isinstance(var_type, util.UninferableBase) or is_none(var_type):
continue
types.add(var_type)
if len(types) > 1:
return None
except astroid.InferenceError:
return None
return types.pop() if types else None
def is_registered_in_singledispatch_function(node: nodes.FunctionDef) -> bool:
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
"functools.singledispatch",
"singledispatch.singledispatch",
)
if not isinstance(node, nodes.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls or register attributes
# when the function is annotated with types
match decorator:
case nodes.Call(func=func) | (nodes.Attribute() as func):
pass
case _:
continue
if not (isinstance(func, nodes.Attribute) and func.attrname == "register"):
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, nodes.FunctionDef):
return decorated_with(func_def, singledispatch_qnames)
return False
def find_inferred_fn_from_register(node: nodes.NodeNG) -> nodes.FunctionDef | None:
# func.register are function calls or register attributes
# when the function is annotated with types
match node:
case nodes.Call(func=func) | (nodes.Attribute() as func):
pass
case _:
return None
if not (isinstance(func, nodes.Attribute) and func.attrname == "register"):
return None
func_def = safe_infer(func.expr)
if not isinstance(func_def, nodes.FunctionDef):
return None
return func_def
def is_registered_in_singledispatchmethod_function(node: nodes.FunctionDef) -> bool:
"""Check if the given function node is a singledispatchmethod function."""
singledispatchmethod_qnames = (
"functools.singledispatchmethod",
"singledispatch.singledispatchmethod",
)
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
func_def = find_inferred_fn_from_register(decorator)
if func_def:
return decorated_with(func_def, singledispatchmethod_qnames)
return False
def get_node_last_lineno(node: nodes.NodeNG) -> int:
"""Get the last lineno of the given node.
For a simple statement this will just be node.lineno,
but for a node that has child statements (e.g. a method) this will be the lineno of the last
child statement recursively.
"""
# 'finalbody' is always the last clause in a try statement, if present
if getattr(node, "finalbody", False):
return get_node_last_lineno(node.finalbody[-1])
# For if, while, and for statements 'orelse' is always the last clause.
# For try statements 'orelse' is the last in the absence of a 'finalbody'
if getattr(node, "orelse", False):
return get_node_last_lineno(node.orelse[-1])
# try statements have the 'handlers' last if there is no 'orelse' or 'finalbody'
if getattr(node, "handlers", False):
return get_node_last_lineno(node.handlers[-1])
# All compound statements have a 'body'
if getattr(node, "body", False):
return get_node_last_lineno(node.body[-1])
# Not a compound statement
return node.lineno # type: ignore[no-any-return]
def is_postponed_evaluation_enabled(node: nodes.NodeNG) -> bool:
"""Check if the postponed evaluation of annotations is enabled."""
module = node.root()
return "annotations" in module.future_imports
def is_node_in_type_annotation_context(node: nodes.NodeNG) -> bool:
"""Check if node is in type annotation context.
Check for 'AnnAssign', function 'Arguments',
or part of function return type annotation.
"""
current_node, parent_node = node, node.parent
while True:
match parent_node:
case nodes.AnnAssign(annotation=ann) if ann == current_node:
return True
case nodes.Arguments() if current_node in (
*parent_node.annotations,
*parent_node.posonlyargs_annotations,
*parent_node.kwonlyargs_annotations,
parent_node.varargannotation,
parent_node.kwargannotation,
):
return True
case nodes.FunctionDef(returns=ret) if ret == current_node:
return True
current_node, parent_node = parent_node, parent_node.parent
if isinstance(parent_node, nodes.Module):
return False
def is_node_in_pep695_type_context(node: nodes.NodeNG) -> nodes.NodeNG | None:
"""Check if node is used in a TypeAlias or as part of a type param."""
return get_node_first_ancestor_of_type(
node, (nodes.TypeAlias, nodes.TypeVar, nodes.ParamSpec, nodes.TypeVarTuple)
)
def is_subclass_of(child: nodes.ClassDef, parent: nodes.ClassDef) -> bool:
"""Check if first node is a subclass of second node.
:param child: Node to check for subclass.
:param parent: Node to check for superclass.
:returns: True if child is derived from parent. False otherwise.
"""
if not all(isinstance(node, nodes.ClassDef) for node in (child, parent)):
return False
for ancestor in child.ancestors():
try:
if astroid.helpers.is_subtype(ancestor, parent):
return True
except astroid.exceptions._NonDeducibleTypeHierarchy:
continue
return False
@lru_cache(maxsize=1024)
def is_overload_stub(node: nodes.NodeNG) -> bool:
"""Check if a node is a function stub decorated with typing.overload.
:param node: Node to check.
:returns: True if node is an overload function stub. False otherwise.
"""
decorators = getattr(node, "decorators", None)
return bool(decorators and decorated_with(node, ["typing.overload", "overload"]))
def is_protocol_class(cls: nodes.NodeNG) -> bool:
"""Check if the given node represents a protocol class.
:param cls: The node to check
:returns: True if the node is or inherits from typing.Protocol directly, false otherwise.
"""
if not isinstance(cls, nodes.ClassDef):
return False
# Return if klass is protocol
if cls.qname() in TYPING_PROTOCOLS:
return True
for base in cls.bases:
try:
for inf_base in base.infer():
if inf_base.qname() in TYPING_PROTOCOLS:
return True
except astroid.InferenceError:
continue
return False
def is_call_of_name(node: nodes.NodeNG, name: str) -> bool:
"""Checks if node is a function call with the given name."""
match node:
case nodes.Call(func=nodes.Name(name=func_name)):
return func_name == name # type: ignore[no-any-return]
return False
def is_test_condition(
node: nodes.NodeNG,
parent: nodes.NodeNG | None = None,
) -> bool:
"""Returns true if the given node is being tested for truthiness."""
match parent := parent or node.parent:
case nodes.While() | nodes.If() | nodes.IfExp() | nodes.Assert():
return node is parent.test or parent.test.parent_of(node)
case nodes.Comprehension():
return node in parent.ifs
return is_call_of_name(parent, "bool") and parent.parent_of(node)
def is_classdef_type(node: nodes.ClassDef) -> bool:
"""Test if ClassDef node is Type."""
if node.name == "type":
return True
return any(isinstance(b, nodes.Name) and b.name == "type" for b in node.bases)
def is_attribute_typed_annotation(
node: nodes.ClassDef | astroid.Instance, attr_name: str
) -> bool:
"""Test if attribute is typed annotation in current node
or any base nodes.
"""
match node.locals.get(attr_name, [None])[0]:
case nodes.AssignName(parent=nodes.AnnAssign()):
return True
for base in node.bases:
match inferred := safe_infer(base):
case nodes.ClassDef() if is_attribute_typed_annotation(inferred, attr_name):
return True
return False
def is_enum(node: nodes.ClassDef) -> bool:
return node.name == "Enum" and node.root().name == "enum" # type: ignore[no-any-return]
def is_assign_name_annotated_with(node: nodes.AssignName, typing_name: str) -> bool:
"""Test if AssignName node has `typing_name` annotation.
Especially useful to check for `typing._SpecialForm` instances
like: `Union`, `Optional`, `Literal`, `ClassVar`, `Final`.
"""
if not isinstance(node.parent, nodes.AnnAssign):
return False
annotation = node.parent.annotation
if isinstance(annotation, nodes.Subscript):
annotation = annotation.value
match annotation:
case nodes.Name(name=n) | nodes.Attribute(attrname=n) if n == typing_name:
return True
return False
def get_iterating_dictionary_name(node: nodes.For | nodes.Comprehension) -> str | None:
"""Get the name of the dictionary which keys are being iterated over on
a ``nodes.For`` or ``nodes.Comprehension`` node.
If the iterating object is not either the keys method of a dictionary
or a dictionary itself, this returns None.
"""
# Is it a proper keys call?
match node.iter:
case nodes.Call(func=nodes.Attribute(attrname="keys")):
inferred = safe_infer(node.iter.func)
if not isinstance(inferred, astroid.BoundMethod):
return None
return node.iter.as_string().rpartition(".keys")[0] # type: ignore[no-any-return]
# Is it a dictionary?
if isinstance(node.iter, (nodes.Name, nodes.Attribute)):
inferred = safe_infer(node.iter)
if not isinstance(inferred, nodes.Dict):
return None
return node.iter.as_string() # type: ignore[no-any-return]
return None
def get_subscript_const_value(node: nodes.Subscript) -> nodes.Const:
"""Returns the value 'subscript.slice' of a Subscript node.
:param node: Subscript Node to extract value from
:returns: Const Node containing subscript value
:raises InferredTypeError: if the subscript node cannot be inferred as a Const
"""
inferred = safe_infer(node.slice)
if not isinstance(inferred, nodes.Const):
raise InferredTypeError("Subscript.slice cannot be inferred as a nodes.Const")
return inferred
def get_import_name(importnode: ImportNode, modname: str | None) -> str | None:
"""Get a prepared module name from the given import node.
In the case of relative imports, this will return the
absolute qualified module name, which might be useful
for debugging. Otherwise, the initial module name
is returned unchanged.
:param importnode: node representing import statement.
:param modname: module name from import statement.
:returns: absolute qualified module name of the module
used in import.
"""
if isinstance(importnode, nodes.ImportFrom) and importnode.level:
root = importnode.root()
if isinstance(root, nodes.Module):
try:
return root.relative_to_absolute_name( # type: ignore[no-any-return]
modname, level=importnode.level
)
except TooManyLevelsError:
return modname
return modname
def is_sys_guard(node: nodes.If) -> bool:
"""Return True if IF stmt is a sys.version_info guard.
>>> import sys
>>> from typing import Literal
"""
if isinstance(node.test, nodes.Compare):
value = node.test.left
if isinstance(value, nodes.Subscript):
value = value.value
if (
isinstance(value, nodes.Attribute)
and value.as_string() == "sys.version_info"
):
return True
elif isinstance(node.test, nodes.Attribute) and node.test.as_string() in {
"six.PY2",
"six.PY3",
}:
return True
return False
def _is_node_in_same_scope(
candidate: nodes.NodeNG, node_scope: nodes.LocalsDictNodeNG
) -> bool:
if isinstance(candidate, (nodes.ClassDef, nodes.FunctionDef)):
return candidate.parent is not None and candidate.parent.scope() is node_scope
return candidate.scope() is node_scope
def _is_reassigned_relative_to_current(
node: nodes.NodeNG, varname: str, before: bool
) -> bool:
"""Check if the given variable name is reassigned in the same scope relative to
the current node.
"""
node_scope = node.scope()
node_lineno = node.lineno
if node_lineno is None:
return False
for a in node_scope.nodes_of_class(
(nodes.AssignName, nodes.ClassDef, nodes.FunctionDef)
):
if a.name == varname and a.lineno is not None:
if before:
if a.lineno < node_lineno:
if _is_node_in_same_scope(a, node_scope):
return True
elif a.lineno > node_lineno:
if _is_node_in_same_scope(a, node_scope):
return True
return False
def is_reassigned_before_current(node: nodes.NodeNG, varname: str) -> bool:
"""Check if the given variable name is reassigned in the same scope before the
current node.
"""
return _is_reassigned_relative_to_current(node, varname, before=True)
def is_reassigned_after_current(node: nodes.NodeNG, varname: str) -> bool:
"""Check if the given variable name is reassigned in the same scope after the
current node.
"""
return _is_reassigned_relative_to_current(node, varname, before=False)
def is_deleted_after_current(node: nodes.NodeNG, varname: str) -> bool:
"""Check if the given variable name is deleted in the same scope after the current
node.
"""
return any(
getattr(target, "name", None) == varname and target.lineno > node.lineno
for del_node in node.scope().nodes_of_class(nodes.Delete)
for target in del_node.targets
)
def is_function_body_ellipsis(node: nodes.FunctionDef) -> bool:
"""Checks whether a function body only consists of a single Ellipsis."""
match node.body:
case [nodes.Expr(value=nodes.Const(value=value))]:
return value is Ellipsis
return False
def is_base_container(node: nodes.NodeNG | None) -> bool:
return isinstance(node, nodes.BaseContainer) and not node.elts
def is_empty_dict_literal(node: nodes.NodeNG | None) -> bool:
return isinstance(node, nodes.Dict) and not node.items
def is_empty_str_literal(node: nodes.NodeNG | None) -> bool:
return (
isinstance(node, nodes.Const) and isinstance(node.value, str) and not node.value
)
def returns_bool(node: nodes.NodeNG) -> bool:
"""Returns true if a node is a nodes.Return that returns a constant boolean."""
match node:
case nodes.Return(value=nodes.Const(value=bool())):
return True
return False
def assigned_bool(node: nodes.NodeNG) -> bool:
"""Returns true if a node is a nodes.Assign that returns a constant boolean."""
match node:
case nodes.Assign(value=nodes.Const(value=bool())):
return True
return False
def get_node_first_ancestor_of_type(
node: nodes.NodeNG, ancestor_type: type[_NodeT] | tuple[type[_NodeT], ...]
) -> _NodeT | None:
"""Return the first parent node that is any of the provided types (or None)."""
for ancestor in node.node_ancestors():
if isinstance(ancestor, ancestor_type):
return ancestor # type: ignore[no-any-return]
return None
def get_node_first_ancestor_of_type_and_its_child(
node: nodes.NodeNG, ancestor_type: type[_NodeT] | tuple[type[_NodeT], ...]
) -> tuple[None, None] | tuple[_NodeT, nodes.NodeNG]:
"""Modified version of get_node_first_ancestor_of_type to also return the
descendant visited directly before reaching the sought ancestor.
Useful for extracting whether a statement is guarded by a try, except, or finally
when searching for a Try ancestor.
"""
child = node
for ancestor in node.node_ancestors():
if isinstance(ancestor, ancestor_type):
return (ancestor, child)
child = ancestor
return None, None
def in_type_checking_block(node: nodes.NodeNG) -> bool:
"""Check if a node is guarded by a TYPE_CHECKING guard."""
for ancestor in node.node_ancestors():
if not isinstance(ancestor, nodes.If):
continue
if isinstance(ancestor.test, nodes.Name):
if ancestor.test.name != "TYPE_CHECKING":
continue
lookup_result = ancestor.test.lookup(ancestor.test.name)[1]
if not lookup_result:
return False
maybe_import_from = lookup_result[0]
if (
isinstance(maybe_import_from, nodes.ImportFrom)
and maybe_import_from.modname == "typing"
):
return True
match safe_infer(ancestor.test):
case nodes.Const(value=False):
return True
elif isinstance(ancestor.test, nodes.Attribute):
if ancestor.test.attrname != "TYPE_CHECKING":
continue
match safe_infer(ancestor.test.expr):
case nodes.Module(name="typing"):
return True
return False
def is_typing_member(node: nodes.NodeNG, names_to_check: tuple[str, ...]) -> bool:
"""Check if `node` is a member of the `typing` module and has one of the names from
`names_to_check`.
"""
match node:
case nodes.Name():
try:
import_from = node.lookup(node.name)[1][0]
except IndexError:
return False
match import_from:
case nodes.ImportFrom(modname="typing"):
return import_from.real_name(node.name) in names_to_check
return False
case nodes.Attribute():
match safe_infer(node.expr):
case nodes.Module(name="typing"):
return node.attrname in names_to_check
return False
return False
@lru_cache
def in_for_else_branch(parent: nodes.NodeNG, stmt: Statement) -> bool:
"""Returns True if stmt is inside the else branch for a parent For stmt."""
return isinstance(parent, nodes.For) and any(
else_stmt.parent_of(stmt) or else_stmt == stmt for else_stmt in parent.orelse
)
def find_assigned_names_recursive(
target: nodes.AssignName | nodes.BaseContainer,
) -> Iterator[str]:
"""Yield the names of assignment targets, accounting for nested ones."""
match target:
case nodes.AssignName():
if target.name is not None:
yield target.name
case nodes.BaseContainer():
for elt in target.elts:
yield from find_assigned_names_recursive(elt)
def has_starred_node_recursive(
node: nodes.For | nodes.Comprehension | nodes.Set | nodes.Starred,
) -> Iterator[bool]:
"""Yield ``True`` if a Starred node is found recursively."""
match node:
case nodes.Starred():
yield True
case nodes.Set():
for elt in node.elts:
yield from has_starred_node_recursive(elt)
case nodes.For() | nodes.Comprehension():
for elt in node.iter.elts:
yield from has_starred_node_recursive(elt)
def is_hashable(node: nodes.NodeNG) -> bool:
"""Return whether any inferred value of `node` is hashable.
When finding ambiguity, return True.
"""
# pylint: disable = too-many-try-statements
try:
for inferred in node.infer():
if isinstance(inferred, (nodes.ClassDef, util.UninferableBase)):
return True
if not hasattr(inferred, "igetattr"):
return True
hash_fn = next(inferred.igetattr("__hash__"))
if hash_fn.parent is inferred:
return True
if getattr(hash_fn, "value", True) is not None:
return True
return False
except astroid.InferenceError:
return True
def subscript_chain_is_equal(left: nodes.Subscript, right: nodes.Subscript) -> bool:
while isinstance(left, nodes.Subscript) and isinstance(right, nodes.Subscript):
try:
if (
get_subscript_const_value(left).value
!= get_subscript_const_value(right).value
):
return False
left = left.value
right = right.value
except InferredTypeError:
return False
return left.as_string() == right.as_string() # type: ignore[no-any-return]
def _is_target_name_in_binop_side(
target: nodes.AssignName | nodes.AssignAttr, side: nodes.NodeNG | None
) -> bool:
"""Determine whether the target name-like node is referenced in the side node."""
match (side, target):
case [nodes.Name(), nodes.AssignName()]:
return target.name == side.name # type: ignore[no-any-return]
case [nodes.Attribute(), nodes.AssignAttr()]:
return target.as_string() == side.as_string() # type: ignore[no-any-return]
case [nodes.Subscript(), nodes.Subscript()]:
return subscript_chain_is_equal(target, side)
case _:
return False
def is_augmented_assign(node: nodes.Assign) -> tuple[bool, str]:
"""Determine if the node is assigning itself (with modifications) to itself.
For example: x = 1 + x
"""
if not isinstance(node.value, nodes.BinOp):
return False, ""
binop = node.value
target = node.targets[0]
if not isinstance(target, (nodes.AssignName, nodes.AssignAttr, nodes.Subscript)):
return False, ""
# We don't want to catch x = "1" + x or x = "%s" % x
if isinstance(binop.left, nodes.Const) and isinstance(
binop.left.value, (str, bytes)
):
return False, ""
# This could probably be improved but for now we disregard all assignments from calls
if isinstance(binop.left, nodes.Call) or isinstance(binop.right, nodes.Call):
return False, ""
if _is_target_name_in_binop_side(target, binop.left):
return True, binop.op
if (
# Unless an operator is commutative, we should not raise (i.e. x = 3/x)
binop.op in COMMUTATIVE_OPERATORS
and _is_target_name_in_binop_side(target, binop.right)
):
if isinstance(binop.left, nodes.Const):
# This bizarrely became necessary after an unrelated call to igetattr().
# Seems like a code smell uncovered in #10212.
# tuple(node.frame().igetattr(node.name))
inferred_left = binop.left
else:
inferred_left = safe_infer(binop.left)
match inferred_left:
case nodes.Const(value=int()):
return True, binop.op
return False, ""
return False, ""
def _qualified_name_parts(qualified_module_name: str) -> list[str]:
"""Split the names of the given module into subparts.
For example,
_qualified_name_parts('pylint.checkers.ImportsChecker')
returns
['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']
"""
names = qualified_module_name.split(".")
return [".".join(names[0 : i + 1]) for i in range(len(names))]
def is_module_ignored(
qualified_module_name: str, ignored_modules: Iterable[str]
) -> bool:
ignored_modules = set(ignored_modules)
for current_module in _qualified_name_parts(qualified_module_name):
# Try to match the module name directly
if current_module in ignored_modules:
return True
for ignore in ignored_modules:
# Try to see if the ignores pattern match against the module name.
if fnmatch.fnmatch(current_module, ignore):
return True
return False
def is_singleton_const(node: nodes.NodeNG) -> bool:
return isinstance(node, nodes.Const) and any(
node.value is value for value in SINGLETON_VALUES
)
def is_terminating_func(node: nodes.Call) -> bool:
"""Detect call to exit(), quit(), os._exit(), sys.exit(), or
functions annotated with `typing.NoReturn` or `typing.Never`.
"""
if not isinstance(node.func, (nodes.Attribute, nodes.Name)) or isinstance(
node.parent, nodes.Lambda
):
return False
try:
for inferred in node.func.infer():
if (
hasattr(inferred, "qname")
and inferred.qname() in TERMINATING_FUNCS_QNAMES
):
return True
match inferred:
case astroid.BoundMethod(_proxied=astroid.UnboundMethod(_proxied=p)):
# Unwrap to get the actual function node object
inferred = p
if ( # pylint: disable=too-many-boolean-expressions
isinstance(inferred, nodes.FunctionDef)
and (
not isinstance(inferred, nodes.AsyncFunctionDef)
or isinstance(node.parent, nodes.Await)
)
and isinstance(inferred.returns, nodes.Name)
and (inferred_func := safe_infer(inferred.returns))
and hasattr(inferred_func, "qname")
and inferred_func.qname()
in (
*TYPING_NEVER,
*TYPING_NORETURN,
# In Python 3.7 - 3.8, NoReturn is alias of '_SpecialForm'
# "typing._SpecialForm",
# But 'typing.Any' also inherits _SpecialForm
# See #9751
)
):
return True
except (StopIteration, astroid.InferenceError):
pass
return False
def is_class_attr(name: str, klass: nodes.ClassDef) -> bool:
try:
klass.getattr(name)
return True
except astroid.NotFoundError:
return False
def get_inverse_comparator(op: str) -> str:
"""Returns the inverse comparator given a comparator.
E.g. when given "==", returns "!="
:param str op: the comparator to look up.
:returns: The inverse of the comparator in string format
:raises KeyError: if input is not recognized as a comparator
"""
return {
"==": "!=",
"!=": "==",
"<": ">=",
">": "<=",
"<=": ">",
">=": "<",
"in": "not in",
"not in": "in",
"is": "is not",
"is not": "is",
}[op]
def not_condition_as_string(
test_node: nodes.Compare | nodes.Name | nodes.UnaryOp | nodes.BoolOp | nodes.BinOp,
) -> str:
match test_node:
case nodes.UnaryOp():
return test_node.operand.as_string() # type: ignore[no-any-return]
case nodes.BoolOp():
return f"not ({test_node.as_string()})"
case nodes.Compare():
lhs = test_node.left
ops, rhs = test_node.ops[0]
lower_priority_expressions = (
nodes.Lambda,
nodes.UnaryOp,
nodes.BoolOp,
nodes.IfExp,
nodes.NamedExpr,
)
lhs = (
f"({lhs.as_string()})"
if isinstance(lhs, lower_priority_expressions)
else lhs.as_string()
)
rhs = (
f"({rhs.as_string()})"
if isinstance(rhs, lower_priority_expressions)
else rhs.as_string()
)
return f"{lhs} {get_inverse_comparator(ops)} {rhs}"
case _:
return f"not {test_node.as_string()}"
@lru_cache(maxsize=1000)
def overridden_method(
klass: nodes.LocalsDictNodeNG, name: str | None
) -> nodes.FunctionDef | None:
"""Get overridden method if any."""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError: # pragma: no cover
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, nodes.FunctionDef):
return meth_node
return None # pragma: no cover
def is_enum_member(node: nodes.AssignName) -> bool:
"""Return `True` if `node` is an Enum member (is an item of the
`__members__` container).
"""
frame = node.frame()
if (
not isinstance(frame, nodes.ClassDef)
or not frame.is_subtype_of("enum.Enum")
or frame.root().qname() == "enum"
):
return False
members = frame.locals.get("__members__")
# A dataclass is one known case for when `members` can be `None`
if members is None:
return False
return node.name in [name_obj.name for value, name_obj in members[0].items]
|
UnsupportedFormatCharacter
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_xy.py
|
{
"start": 6166,
"end": 6776
}
|
class ____(scale_continuous[None]):
"""
Base class for continuous position scales
"""
guide: None = None
def map(self, x, limits=None):
# Position aesthetics don't map, because the coordinate
# system takes care of it.
# But the continuous scale has to deal with out of bound points
if not len(x):
return x
if limits is None:
limits = self.final_limits
scaled = self.oob(x, limits) # type: ignore
scaled[pd.isna(scaled)] = self.na_value
return scaled
@dataclass(kw_only=True)
|
scale_position_continuous
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/selectable.py
|
{
"start": 103692,
"end": 103991
}
|
class ____(FromGrouping, NamedFromClause):
"""represent a grouping of a named FROM clause
.. versionadded:: 2.0
"""
inherit_cache = True
if TYPE_CHECKING:
def self_group(
self, against: Optional[OperatorType] = None
) -> Self: ...
|
NamedFromGrouping
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/exceptions.py
|
{
"start": 1524,
"end": 1903
}
|
class ____(ArtifactStatusError):
"""Raised for Artifact methods or attributes that can't be changed after logging."""
def __init__(self, fullname: str, obj: ArtifactT):
*_, name = fullname.split(".")
msg = f"{fullname!r} used on logged artifact. Can't modify finalized artifact."
super().__init__(msg=msg, name=name, obj=obj)
|
ArtifactFinalizedError
|
python
|
walkccc__LeetCode
|
solutions/3365. Rearrange K Substrings to Form Target String/3365.py
|
{
"start": 0,
"end": 258
}
|
class ____:
def isPossibleToRearrange(self, s: str, t: str, k: int) -> bool:
n = len(s)
return (collections.Counter(s[i:i + n // k] for i in range(0, n, n // k)) ==
collections.Counter(t[i:i + n // k] for i in range(0, n, n // k)))
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/linked-list-cycle.py
|
{
"start": 127,
"end": 431
}
|
class ____(object):
# @param head, a ListNode
# @return a boolean
def hasCycle(self, head):
fast, slow = head, head
while fast and fast.next:
fast, slow = fast.next.next, slow.next
if fast is slow:
return True
return False
|
Solution
|
python
|
getsentry__sentry-python
|
sentry_sdk/integrations/pure_eval.py
|
{
"start": 794,
"end": 4605
}
|
class ____(Integration):
identifier = "pure_eval"
@staticmethod
def setup_once():
# type: () -> None
@add_global_event_processor
def add_executing_info(event, hint):
# type: (Event, Optional[Hint]) -> Optional[Event]
if sentry_sdk.get_client().get_integration(PureEvalIntegration) is None:
return event
if hint is None:
return event
exc_info = hint.get("exc_info", None)
if exc_info is None:
return event
exception = event.get("exception", None)
if exception is None:
return event
values = exception.get("values", None)
if values is None:
return event
for exception, (_exc_type, _exc_value, exc_tb) in zip(
reversed(values), walk_exception_chain(exc_info)
):
sentry_frames = [
frame
for frame in exception.get("stacktrace", {}).get("frames", [])
if frame.get("function")
]
tbs = list(iter_stacks(exc_tb))
if len(sentry_frames) != len(tbs):
continue
for sentry_frame, tb in zip(sentry_frames, tbs):
sentry_frame["vars"] = (
pure_eval_frame(tb.tb_frame) or sentry_frame["vars"]
)
return event
def pure_eval_frame(frame):
# type: (FrameType) -> Dict[str, Any]
source = executing.Source.for_frame(frame)
if not source.tree:
return {}
statements = source.statements_at_line(frame.f_lineno)
if not statements:
return {}
scope = stmt = list(statements)[0]
while True:
# Get the parent first in case the original statement is already
# a function definition, e.g. if we're calling a decorator
# In that case we still want the surrounding scope, not that function
scope = scope.parent
if isinstance(scope, (ast.FunctionDef, ast.ClassDef, ast.Module)):
break
evaluator = pure_eval.Evaluator.from_frame(frame)
expressions = evaluator.interesting_expressions_grouped(scope)
def closeness(expression):
# type: (Tuple[List[Any], Any]) -> Tuple[int, int]
# Prioritise expressions with a node closer to the statement executed
# without being after that statement
# A higher return value is better - the expression will appear
# earlier in the list of values and is less likely to be trimmed
nodes, _value = expression
def start(n):
# type: (ast.expr) -> Tuple[int, int]
return (n.lineno, n.col_offset)
nodes_before_stmt = [
node
for node in nodes
if start(node) < stmt.last_token.end # type: ignore
]
if nodes_before_stmt:
# The position of the last node before or in the statement
return max(start(node) for node in nodes_before_stmt)
else:
# The position of the first node after the statement
# Negative means it's always lower priority than nodes that come before
# Less negative means closer to the statement and higher priority
lineno, col_offset = min(start(node) for node in nodes)
return (-lineno, -col_offset)
# This adds the first_token and last_token attributes to nodes
atok = source.asttokens()
expressions.sort(key=closeness, reverse=True)
vars = {
atok.get_text(nodes[0]): value
for nodes, value in expressions[: serializer.MAX_DATABAG_BREADTH]
}
return serializer.serialize(vars, is_vars=True)
|
PureEvalIntegration
|
python
|
walkccc__LeetCode
|
solutions/2582. Pass the Pillow/2582.py
|
{
"start": 0,
"end": 249
}
|
class ____:
def passThePillow(self, n: int, time: int) -> int:
# Repeat every (n - 1) * 2 seconds.
time %= (n - 1) * 2
if time < n: # Go forward from 1.
return 1 + time
return n - (time - (n - 1)) # Go backward from n.
|
Solution
|
python
|
huggingface__transformers
|
tests/models/mbart/test_tokenization_mbart.py
|
{
"start": 2974,
"end": 8726
}
|
class ____(unittest.TestCase):
checkpoint_name = "facebook/mbart-large-en-ro"
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
tgt_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
expected_src_tokens = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def setUpClass(cls):
cls.tokenizer: MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="en_XX", tgt_lang="ro_RO"
)
cls.pad_token_id = 1
return cls
def check_language_codes(self):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"], 250001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"], 250004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"], 250020)
def test_enro_tokenizer_batch_encode_plus(self):
ids = self.tokenizer(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, ids)
def test_enro_tokenizer_decode_ignores_language_codes(self):
self.assertIn(RO_CODE, self.tokenizer.all_special_ids)
generated_ids = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
result = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True)
self.assertEqual(result, expected_romanian)
self.assertNotIn(self.tokenizer.eos_token, result)
def test_enro_tokenizer_truncation(self):
src_text = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0], str)
desired_max_length = 10
ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0]
self.assertEqual(ids[-2], 2)
self.assertEqual(ids[-1], EN_CODE)
self.assertEqual(len(ids), desired_max_length)
def test_mask_token(self):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [250026, 250001])
def test_special_tokens_unaffacted_by_save_load(self):
tmpdirname = tempfile.mkdtemp()
original_special_tokens = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(tmpdirname)
new_tok = MBartTokenizer.from_pretrained(tmpdirname)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens)
@require_torch
def test_batch_fairseq_parity(self):
batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors="pt")
batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def test_enro_tokenizer_prepare_batch(self):
batch = self.tokenizer(
self.src_text,
text_target=self.tgt_text,
padding=True,
truncation=True,
max_length=len(self.expected_src_tokens),
return_tensors="pt",
)
batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual((2, 14), batch.input_ids.shape)
self.assertEqual((2, 14), batch.attention_mask.shape)
result = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, result)
self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE])
def test_seq2seq_max_length(self):
batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt")
targets = self.tokenizer(
text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt"
)
labels = targets["input_ids"]
batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def test_tokenizer_translation(self):
inputs = self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="en_XX", tgt_lang="ar_AR"
)
self.assertEqual(
nested_simplify(inputs),
{
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
},
)
|
MBartEnroIntegrationTest
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/lite_v2_test.py
|
{
"start": 202167,
"end": 205520
}
|
class ____(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testCOncreteFunctionFloat(self):
root = self._getSimpleVariableModel()
input_data = tf.constant(1.0, shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func], root
)
converter._experimental_use_buffer_offset = True
tflite_model = converter.convert()
# Check output value from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testConcreteFunctionStringInput(self):
class Model(tf.Module):
@tf.function
def __call__(self, x):
return x
root = Model()
concrete_func = root.__call__.get_concrete_function(
tf.constant([str(x) for x in range(11)])
)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func], root
)
converter._experimental_use_buffer_offset = True
tflite_model = converter.convert()
input_data = tf.constant(
[str(x) for x in range(11)], shape=(11,), dtype=tf.dtypes.string
)
# Check values from converted model.
interp = interpreter.Interpreter(model_content=tflite_model)
interp.allocate_tensors()
my_signature = interp.get_signature_runner()
with self.assertRaises(ValueError) as error:
_ = my_signature(x=input_data)
self.assertIn(
'Passed in value type is not a numpy array, got type ',
str(error.exception),
)
@test_util.run_v2_only
def testSavedModelSignatureDefs(self):
"""Test converting SignatureDef is correct and uses SignatureDef API."""
root = self._getMultiFunctionModel()
input_data_0 = tf.constant(1.0, shape=[1])
input_data_1 = tf.constant(3.0, shape=[1])
mul_add_func = root.mul_add.get_concrete_function(
input_data_1, input_data_0
)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save.save(root, save_dir, {'mul_add': mul_add_func})
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['mul_add']
)
converter._experimental_use_buffer_offset = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.mul_add(input_data_1, input_data_0)
interp = interpreter.Interpreter(model_content=tflite_model)
signature_defs = interp.get_signature_list()
results = self._evaluateTFLiteModelUsingSignatureDef(
tflite_model, 'mul_add', {'y': input_data_0, 'x': input_data_1}
)
self.assertEqual(list(results.keys()), ['output_0'])
self.assertEqual(expected_value.numpy(), results['output_0'])
# Verify the SignatureDef structure returned is as expected.
self.assertLen(signature_defs, 1)
self.assertEqual(list(signature_defs.keys()), ['mul_add'])
self.assertLen(signature_defs.values(), 1)
self.assertEqual(
list(signature_defs['mul_add'].keys()), ['inputs', 'outputs']
)
self.assertCountEqual(signature_defs['mul_add']['inputs'], ['x', 'y'])
self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0'])
|
BufferOffsetTest
|
python
|
facebook__pyre-check
|
client/coverage_data.py
|
{
"start": 1928,
"end": 2131
}
|
class ____(json_mixins.SnakeCaseAndExcludeJsonMixin):
name: str
is_annotated: bool
location: Location
contains_explicit_any: bool
@dataclasses.dataclass(frozen=True)
|
ParameterAnnotationInfo
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/endpoints/test_validators.py
|
{
"start": 2129,
"end": 2563
}
|
class ____(BaseDataSourceValidator[MockModel]):
field1 = serializers.CharField()
field2 = serializers.IntegerField()
data_source_type_handler = QuerySubscriptionDataSourceHandler
class Meta:
model = MockModel
fields = [
"field1",
"field2",
]
def create_source(self, validated_data: Any) -> MockModel:
return MockModel.objects.create()
|
MockDataSourceValidator
|
python
|
eth-brownie__brownie
|
brownie/convert/datatypes.py
|
{
"start": 10660,
"end": 15996
}
|
class ____(tuple):
"""Tuple subclass with dict-like functionality, used for iterable return values."""
_abi: Optional[List[ABIComponent]] = None
_dict: Dict[str, Any] = {}
def __new__(
cls,
values: Iterable[Any],
abi: Optional[Sequence[ABIComponent]] = None,
) -> "ReturnValue":
values = list(values)
for i, value in enumerate(values):
if isinstance(value, (tuple, list)) and not isinstance(value, ReturnValue):
if abi is not None and "components" in (value_abi := abi[i]):
if value_abi["type"] == "tuple":
# tuple
values[i] = ReturnValue(value, value_abi["components"])
else:
# array of tuples
inner_abi = value_abi.copy()
length = len(value)
inner_abi["type"] = inner_abi["type"].rsplit("[", maxsplit=1)[0]
final_abi = [deepcopy(inner_abi) for i in range(length)]
if inner_abi.get("name"):
name = inner_abi["name"]
for x in range(length):
final_abi[x]["name"] = f"{name}[{x}]"
values[i] = ReturnValue(value, final_abi)
else:
# array
values[i] = ReturnValue(value)
self = tuple.__new__(cls, values)
self._abi = list(abi) if abi else []
self._dict = {i.get("name", "") or f"arg[{c}]": values[c] for c, i in enumerate(self._abi)}
return self
def __hash__(self) -> int:
return super().__hash__()
def __eq__(self, other: Any) -> bool:
return _kwargtuple_compare(self, other)
def __ne__(self, other: Any) -> bool:
return not _kwargtuple_compare(self, other)
@overload # type: ignore [override]
def __getitem__(self, key: int) -> Any: ...
@overload
def __getitem__(self, key: str) -> Any: ...
@overload
def __getitem__(
self,
key: "slice[Optional[int], Optional[int], Optional[int]]",
) -> "ReturnValue": ...
def __getitem__(
self,
key: Union[str, int, "slice[Optional[int], Optional[int], Optional[int]]"],
) -> Any:
if type(key) is slice:
abi = self._abi
result = super().__getitem__(key)
if abi is None:
return ReturnValue(result)
item_abi = deepcopy(abi)[key]
return ReturnValue(result, item_abi)
if isinstance(key, int):
return super().__getitem__(key)
return self._dict[key]
def __contains__(self, value: Any) -> bool:
return self.count(value) > 0
def count(self, value: Any) -> int:
"""ReturnValue.count(value) -> integer -- return number of occurrences of value"""
count = 0
for item in self:
try:
if _kwargtuple_compare(item, value):
count += 1
except TypeError:
continue
return count
def dict(self) -> Dict[str, Any]:
"""ReturnValue.dict() -> a dictionary of ReturnValue's named items"""
response = {}
for k, v in self._dict.items():
if isinstance(v, ReturnValue) and v._abi:
response[k] = v.dict()
else:
response[k] = v
return response
def index(self, value: Any, start: int = 0, stop: Any = None) -> int: # type: ignore [override]
"""ReturnValue.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present."""
if stop is None:
stop = len(self)
for i in range(start, stop):
try:
if _kwargtuple_compare(self[i], value):
return i
except TypeError:
continue
raise ValueError(f"{value} is not in ReturnValue")
def items(self) -> ItemsView[str, Any]:
"""ReturnValue.items() -> a set-like object providing a view on ReturnValue's named items"""
return self._dict.items()
def keys(self) -> KeysView[str]:
"""ReturnValue.keys() -> a set-like object providing a view on ReturnValue's keys"""
return self._dict.keys()
def _kwargtuple_compare(a: Any, b: Any) -> bool:
if not isinstance(a, (tuple, list, ReturnValue)):
types_ = {type(a), type(b)}
if types_.intersection((bool, type(None))):
return a is b
if types_.intersection((dict, EthAddress, HexString)):
return a == b
return _convert_str(a) == _convert_str(b)
if not isinstance(b, (tuple, list, ReturnValue)) or len(b) != len(a):
return False
return all(_kwargtuple_compare(ai, bi) for ai, bi in zip(a, b))
def _convert_str(value: Any) -> Wei:
if not isinstance(value, str):
if not hasattr(value, "address"):
return value
value = value.address
if value.startswith("0x"):
return "0x" + value.lstrip("0x").lower()
if value.count(" ") != 1:
return value
try:
return Wei(value)
except (ValueError, TypeError):
return value
|
ReturnValue
|
python
|
scipy__scipy
|
scipy/sparse/tests/test_minmax1d.py
|
{
"start": 652,
"end": 2643
}
|
class ____:
def test_minmax(self, spcreator):
D = np.arange(5)
X = spcreator(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 4)
assert_equal((-X).min(), -4)
assert_equal((-X).max(), 0)
def test_minmax_axis(self, spcreator):
D = np.arange(50)
X = spcreator(D)
for axis in [0, -1]:
assert_array_equal(
toarray(X.max(axis=axis)), D.max(axis=axis, keepdims=True)
)
assert_array_equal(
toarray(X.min(axis=axis)), D.min(axis=axis, keepdims=True)
)
for axis in [-2, 1]:
with pytest.raises(ValueError, match="axis out of range"):
X.min(axis=axis)
with pytest.raises(ValueError, match="axis out of range"):
X.max(axis=axis)
def test_numpy_minmax(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
assert_array_equal(np.min(datsp), np.min(dat))
assert_array_equal(np.max(datsp), np.max(dat))
def test_argmax(self, spcreator):
D1 = np.array([-1, 5, 2, 3])
D2 = np.array([0, 0, -1, -2])
D3 = np.array([-1, -2, -3, -4])
D4 = np.array([1, 2, 3, 4])
D5 = np.array([1, 2, 0, 0])
for D in [D1, D2, D3, D4, D5]:
mat = spcreator(D)
assert_equal(mat.argmax(), np.argmax(D))
assert_equal(mat.argmin(), np.argmin(D))
assert_equal(mat.argmax(axis=0), np.argmax(D, axis=0))
assert_equal(mat.argmin(axis=0), np.argmin(D, axis=0))
D6 = np.empty((0,))
for axis in [None, 0]:
mat = spcreator(D6)
with pytest.raises(ValueError, match="to an empty matrix"):
mat.argmin(axis=axis)
with pytest.raises(ValueError, match="to an empty matrix"):
mat.argmax(axis=axis)
@pytest.mark.parametrize("spcreator", formats_for_minmax)
|
Test_MinMaxMixin1D
|
python
|
getsentry__sentry
|
src/sentry/replays/usecases/ingest/__init__.py
|
{
"start": 3687,
"end": 3882
}
|
class ____(TypedDict):
context: EventContext
payload_compressed: bytes
payload: bytes
replay_event: dict[str, Any] | None
replay_video: bytes | None
@dataclasses.dataclass
|
Event
|
python
|
nmslib__hnswlib
|
examples/python/pyw_hnswlib.py
|
{
"start": 150,
"end": 1948
}
|
class ____():
def __init__(self, space, dim):
self.index = hnswlib.Index(space, dim)
self.lock = threading.Lock()
self.dict_labels = {}
self.cur_ind = 0
def init_index(self, max_elements, ef_construction=200, M=16):
self.index.init_index(max_elements=max_elements, ef_construction=ef_construction, M=M)
def add_items(self, data, ids=None):
if ids is not None:
assert len(data) == len(ids)
num_added = len(data)
with self.lock:
start = self.cur_ind
self.cur_ind += num_added
int_labels = []
if ids is not None:
for dl in ids:
int_labels.append(start)
self.dict_labels[start] = dl
start += 1
else:
for _ in range(len(data)):
int_labels.append(start)
self.dict_labels[start] = start
start += 1
self.index.add_items(data=data, ids=np.asarray(int_labels))
def set_ef(self, ef):
self.index.set_ef(ef)
def load_index(self, path):
self.index.load_index(path)
with open(path + ".pkl", "rb") as f:
self.cur_ind, self.dict_labels = pickle.load(f)
def save_index(self, path):
self.index.save_index(path)
with open(path + ".pkl", "wb") as f:
pickle.dump((self.cur_ind, self.dict_labels), f)
def set_num_threads(self, num_threads):
self.index.set_num_threads(num_threads)
def knn_query(self, data, k=1):
labels_int, distances = self.index.knn_query(data=data, k=k)
labels = []
for li in labels_int:
labels.append(
[self.dict_labels[l] for l in li]
)
return labels, distances
|
Index
|
python
|
scipy__scipy
|
scipy/signal/tests/test_filter_design.py
|
{
"start": 55165,
"end": 59888
}
|
class ____:
def test_ticket1441(self, xp):
"""Regression test for ticket 1441."""
# Because freqz previously used arange instead of linspace,
# when N was large, it would return one more point than
# requested.
N = 100000
w, h = freqz_zpk(xp.asarray([0.5]), xp.asarray([0.5]), 1.0, worN=N)
assert w.shape == (N,)
def test_basic(self, xp):
w, h = freqz_zpk(xp.asarray([0.5]), xp.asarray([0.5]), 1.0, worN=8)
assert_array_almost_equal(w, xp.pi * xp.arange(8.0) / 8)
assert_array_almost_equal(h, xp.ones(8))
def test_basic_whole(self, xp):
w, h = freqz_zpk(xp.asarray([0.5]), xp.asarray([0.5]), 1.0, worN=8, whole=True)
assert_array_almost_equal(w, 2 * xp.pi * xp.arange(8.0) / 8)
assert_array_almost_equal(h, xp.ones(8))
@pytest.mark.xfail(DEFAULT_F32, reason="wrong answer with torch/float32")
def test_vs_freqz(self, xp):
b, a = cheby1(4, 5, 0.5, analog=False, output='ba')
z, p, k = cheby1(4, 5, 0.5, analog=False, output='zpk')
w1, h1 = freqz(b, a)
z, p, k, w1, h1 = map(xp.asarray, (z, p, k, w1, h1))
w2, h2 = freqz_zpk(z, p, k)
xp_assert_close(w1, w2)
xp_assert_close(h1, h2, rtol=1.3e-6)
def test_backward_compat(self, xp):
# For backward compatibility, test if None act as a wrapper for default
w1, h1 = freqz_zpk(xp.asarray([0.5]), xp.asarray([0.5]), 1.0)
w2, h2 = freqz_zpk(xp.asarray([0.5]), xp.asarray([0.5]), 1.0, None)
assert_array_almost_equal(w1, w2)
assert_array_almost_equal(h1, h2)
def test_fs_param(self, xp):
fs = 900
z = xp.asarray([-1, -1, -1.0])
p = xp.asarray(
[0.4747869998473389 + 0.4752230717749344j,
0.37256600288916636,
0.4747869998473389 - 0.4752230717749344j]
)
k = 0.03934683014103762
# N = None, whole=False
w1, h1 = freqz_zpk(z, p, k, whole=False, fs=fs)
w2, h2 = freqz_zpk(z, p, k, whole=False)
xp_assert_close(h1, h2)
xp_assert_close(w1, xp.linspace(0, fs/2, 512, endpoint=False))
# N = None, whole=True
w1, h1 = freqz_zpk(z, p, k, whole=True, fs=fs)
w2, h2 = freqz_zpk(z, p, k, whole=True)
xp_assert_close(h1, h2)
xp_assert_close(w1, xp.linspace(0, fs, 512, endpoint=False))
# N = 5, whole=False
w1, h1 = freqz_zpk(z, p, k, 5, fs=fs)
w2, h2 = freqz_zpk(z, p, k, 5)
xp_assert_close(h1, h2)
xp_assert_close(w1, xp.linspace(0, fs/2, 5, endpoint=False))
# N = 5, whole=True
w1, h1 = freqz_zpk(z, p, k, 5, whole=True, fs=fs)
w2, h2 = freqz_zpk(z, p, k, 5, whole=True)
xp_assert_close(h1, h2)
xp_assert_close(w1, xp.linspace(0, fs, 5, endpoint=False))
@skip_xp_backends(np_only=True, reason="array_likes")
def test_fs_param2(self, xp):
fs = 900
z = xp.asarray([-1, -1, -1.0])
p = xp.asarray(
[0.4747869998473389 + 0.4752230717749344j,
0.37256600288916636,
0.4747869998473389 - 0.4752230717749344j]
)
k = 0.03934683014103762
# w is an array_like
for w in ([123], (123,), xp.asarray([123]), (50, 123, 230),
xp.asarray([50, 123, 230])):
w1, h1 = freqz_zpk(z, p, k, w, fs=fs)
w2, h2 = freqz_zpk(z, p, k, 2*pi*xp.asarray(w)/fs)
xp_assert_close(h1, h2)
xp_assert_close(w, w1, check_dtype=False)
def test_w_or_N_types(self):
# Measure at 8 equally-spaced points
for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
np.array(8)):
w, h = freqz_zpk([], [], 1, worN=N)
assert_array_almost_equal(w, np.pi * np.arange(8) / 8.)
assert_array_almost_equal(h, np.ones(8))
w, h = freqz_zpk([], [], 1, worN=N, fs=100)
assert_array_almost_equal(w, np.linspace(0, 50, 8, endpoint=False))
assert_array_almost_equal(h, np.ones(8))
# Measure at frequency 8 Hz
for w in (8.0, 8.0+0j):
# Only makes sense when fs is specified
w_out, h = freqz_zpk([], [], 1, worN=w, fs=100)
assert_array_almost_equal(w_out, [8])
assert_array_almost_equal(h, [1])
def test_fs_validation(self):
with pytest.raises(ValueError, match="Sampling.*single scalar"):
freqz_zpk([1.0], [1.0], [1.0], fs=np.array([10., 20]))
with pytest.raises(ValueError, match="Sampling.*be none."):
freqz_zpk([1.0], [1.0], [1.0], fs=None)
@make_xp_test_case(normalize)
|
TestFreqz_zpk
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.