language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | optuna__optuna | optuna/storages/_rdb/alembic/versions/v3.0.0.a.py | {
"start": 1910,
"end": 2243
} | class ____(BaseModel):
__tablename__ = "trials"
trial_id = Column(Integer, primary_key=True)
number = Column(Integer)
study_id = Column(Integer, ForeignKey("studies.study_id"))
state = Column(Enum(TrialState), nullable=False)
datetime_start = Column(DateTime)
datetime_complete = Column(DateTime)
| TrialModel |
python | ray-project__ray | python/ray/dashboard/modules/job/tests/test_job_manager.py | {
"start": 20069,
"end": 23252
} | class ____:
async def test_submit_basic_echo(self, job_manager):
job_id = await job_manager.submit_job(entrypoint="echo hello")
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
assert "hello\n" in job_manager.get_job_logs(job_id)
async def test_submit_stderr(self, job_manager):
job_id = await job_manager.submit_job(entrypoint="echo error 1>&2")
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
assert "error\n" in job_manager.get_job_logs(job_id)
async def test_submit_ls_grep(self, job_manager):
grep_cmd = f"ls {os.path.dirname(__file__)} | grep test_job_manager.py"
job_id = await job_manager.submit_job(entrypoint=grep_cmd)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
assert "test_job_manager.py\n" in job_manager.get_job_logs(job_id)
async def test_subprocess_exception(self, job_manager):
"""
Run a python script with exception, ensure:
1) Job status is marked as failed
2) Job manager can surface exception message back to logs api
3) Job no hanging job supervisor actor
4) Empty logs
"""
run_cmd = f"python {_driver_script_path('script_with_exception.py')}"
job_id = await job_manager.submit_job(entrypoint=run_cmd)
async def cleaned_up():
data = await job_manager.get_job_info(job_id)
if data.status != JobStatus.FAILED:
return False
if "Exception: Script failed with exception !" not in data.message:
return False
return job_manager._get_actor_for_job(job_id) is None
await async_wait_for_condition(cleaned_up)
async def test_submit_with_s3_runtime_env(self, job_manager):
job_id = await job_manager.submit_job(
entrypoint="python script.py",
runtime_env={"working_dir": "s3://runtime-env-test/script_runtime_env.zip"},
)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
assert "Executing main() from script.py !!\n" in job_manager.get_job_logs(
job_id
)
async def test_submit_with_file_runtime_env(self, job_manager):
with tempfile.NamedTemporaryFile(suffix=".zip") as f:
filename, _ = urllib.request.urlretrieve(
"https://runtime-env-test.s3.amazonaws.com/script_runtime_env.zip",
filename=f.name,
)
job_id = await job_manager.submit_job(
entrypoint="python script.py",
runtime_env={"working_dir": "file://" + filename},
)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
assert "Executing main() from script.py !!\n" in job_manager.get_job_logs(
job_id
)
@pytest.mark.asyncio
| TestShellScriptExecution |
python | spyder-ide__spyder | spyder/api/widgets/toolbars.py | {
"start": 3166,
"end": 10132
} | class ____(QToolBar):
"""
Spyder Toolbar.
This class provides toolbars with some predefined functionality.
"""
sig_is_rendered = Signal()
"""
This signal is emitted to let other objects know that the toolbar is now
rendered.
"""
def __init__(self, parent, title):
super().__init__(parent=parent)
# Attributes
self._title = title
self._section_items = OrderedDict()
self._item_map: Dict[str, ToolbarItem] = {}
self._pending_items: Dict[str, List[ToolbarItemEntry]] = {}
self._default_section = "default_section"
self._filter = None
self.setWindowTitle(title)
# Set attributes for extension button.
# From https://stackoverflow.com/a/55412455/438386
ext_button = self.findChild(QToolButton, "qt_toolbar_ext_button")
ext_button.setIcon(ima.icon('toolbar_ext_button'))
ext_button.setToolTip(_("More"))
# Set style for extension button menu (not all extension buttons have
# it).
if ext_button.menu():
ext_button.menu().setStyleSheet(
SpyderMenu._generate_stylesheet().toString()
)
ext_button_menu_style = SpyderMenuProxyStyle(None)
ext_button_menu_style.setParent(self)
ext_button.menu().setStyle(ext_button_menu_style)
def add_item(
self,
action_or_widget: ToolbarItem,
section: Optional[str] = None,
before: Optional[str] = None,
before_section: Optional[str] = None,
omit_id: bool = False
):
"""
Add action or widget item to given toolbar `section`.
Parameters
----------
item: SpyderAction or QWidget
The item to add to the `toolbar`.
toolbar_id: str or None
The application toolbar unique string identifier.
section: str or None
The section id in which to insert the `item` on the `toolbar`.
before: str or None
Make the item appear before another given item.
before_section: str or None
Make the item defined section appear before another given section
(must be already defined).
omit_id: bool
If True, then the toolbar will check if the item to add declares an
id, False otherwise. This flag exists only for items added on
Spyder 4 plugins. Default: False
"""
item_id = None
if (
isinstance(action_or_widget, SpyderAction)
or hasattr(action_or_widget, 'action_id')
):
item_id = action_or_widget.action_id
elif hasattr(action_or_widget, 'ID'):
item_id = action_or_widget.ID
if not omit_id and item_id is None and action_or_widget is not None:
raise SpyderAPIError(
f'Item {action_or_widget} must declare an ID attribute.'
)
if before is not None:
if before not in self._item_map:
before_pending_items = self._pending_items.get(before, [])
before_pending_items.append(
(action_or_widget, section, before, before_section))
self._pending_items[before] = before_pending_items
return
else:
before = self._item_map[before]
if section is None:
section = self._default_section
action_or_widget._section = section
if before is not None:
if section == self._default_section:
action_or_widget._section = before._section
section = before._section
if section not in self._section_items:
self._section_items[section] = [action_or_widget]
else:
if before is not None:
new_actions_or_widgets = []
for act_or_wid in self._section_items[section]:
if act_or_wid == before:
new_actions_or_widgets.append(action_or_widget)
new_actions_or_widgets.append(act_or_wid)
self._section_items[section] = new_actions_or_widgets
else:
self._section_items[section].append(action_or_widget)
if (before_section is not None and
before_section in self._section_items):
new_sections_keys = []
for sec in self._section_items.keys():
if sec == before_section:
new_sections_keys.append(section)
if sec != section:
new_sections_keys.append(sec)
self._section_items = OrderedDict(
(section_key, self._section_items[section_key])
for section_key in new_sections_keys)
if item_id is not None:
self._item_map[item_id] = action_or_widget
if item_id in self._pending_items:
item_pending = self._pending_items.pop(item_id)
for item, section, before, before_section in item_pending:
self.add_item(item, section=section, before=before,
before_section=before_section)
def remove_item(self, item_id: str):
"""Remove action or widget from toolbar by id."""
try:
item = self._item_map.pop(item_id)
for section in list(self._section_items.keys()):
section_items = self._section_items[section]
if item in section_items:
section_items.remove(item)
if len(section_items) == 0:
self._section_items.pop(section)
self.clear()
self.render()
except KeyError:
pass
def render(self):
"""Create the toolbar taking into account sections and locations."""
sec_items = []
for sec, items in self._section_items.items():
for item in items:
sec_items.append([sec, item])
sep = QAction(self)
sep.setSeparator(True)
sec_items.append((None, sep))
if sec_items:
sec_items.pop()
for (sec, item) in sec_items:
if isinstance(item, QAction):
add_method = super().addAction
else:
add_method = super().addWidget
add_method(item)
if isinstance(item, QAction):
widget = self.widgetForAction(item)
if self._filter is not None:
widget.installEventFilter(self._filter)
text_beside_icon = getattr(item, 'text_beside_icon', False)
if text_beside_icon:
widget.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
if item.isCheckable():
widget.setCheckable(True)
self.sig_is_rendered.emit()
| SpyderToolbar |
python | great-expectations__great_expectations | great_expectations/metrics/column/mean.py | {
"start": 179,
"end": 292
} | class ____(ColumnMetric[ColumnMeanResult]):
"""Mean of values in a column"""
name = "column.mean"
| ColumnMean |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_workflow_index.py | {
"start": 37529,
"end": 45985
} | class ____(OrganizationWorkflowAPITestCase):
method = "DELETE"
def assert_unaffected_workflows(self, workflows: Sequence[Workflow]) -> None:
for workflow in workflows:
workflow.refresh_from_db()
assert Workflow.objects.get(id=workflow.id).status != ObjectStatus.PENDING_DELETION
def setUp(self) -> None:
super().setUp()
self.workflow = self.create_workflow(
organization_id=self.organization.id, name="Test Workflow"
)
self.workflow_two = self.create_workflow(
organization_id=self.organization.id, name="Another Workflow"
)
self.workflow_three = self.create_workflow(
organization_id=self.organization.id, name="Third Workflow"
)
def test_delete_workflows_by_ids_success(self) -> None:
"""Test successful deletion of workflows by specific IDs"""
with outbox_runner():
self.get_success_response(
self.organization.slug,
qs_params=[("id", str(self.workflow.id)), ("id", str(self.workflow_two.id))],
status_code=204,
)
# Ensure the workflows are scheduled for deletion
self.workflow.refresh_from_db()
self.workflow_two.refresh_from_db()
assert self.workflow.status == ObjectStatus.PENDING_DELETION
assert self.workflow_two.status == ObjectStatus.PENDING_DELETION
assert RegionScheduledDeletion.objects.filter(
model_name="Workflow",
object_id=self.workflow.id,
).exists()
assert RegionScheduledDeletion.objects.filter(
model_name="Workflow",
object_id=self.workflow_two.id,
).exists()
# Delete the workflows
with self.tasks():
run_scheduled_deletions()
# Ensure workflows are removed
assert not Workflow.objects.filter(id=self.workflow.id).exists()
assert not Workflow.objects.filter(id=self.workflow_two.id).exists()
# Verify third workflow is unaffected
self.assert_unaffected_workflows([self.workflow_three])
def test_delete_workflows_by_query_success(self) -> None:
with outbox_runner():
self.get_success_response(
self.organization.slug,
qs_params={"query": "test"},
status_code=204,
)
# Ensure the workflow is scheduled for deletion
self.workflow.refresh_from_db()
assert self.workflow.status == ObjectStatus.PENDING_DELETION
assert RegionScheduledDeletion.objects.filter(
model_name="Workflow",
object_id=self.workflow.id,
).exists()
# Delete the workflows
with self.tasks():
run_scheduled_deletions()
# Ensure workflow is removed
assert not Workflow.objects.filter(id=self.workflow.id).exists()
# Other workflows should be unaffected
self.assert_unaffected_workflows([self.workflow_two, self.workflow_three])
def test_delete_workflows_by_project_success(self) -> None:
# Create detectors and link workflows to projects
detector_1 = self.create_detector(project=self.project)
detector_2 = self.create_detector(project=self.project)
other_project = self.create_project(organization=self.organization)
detector_3 = self.create_detector(project=other_project)
self.create_detector_workflow(workflow=self.workflow, detector=detector_1)
self.create_detector_workflow(workflow=self.workflow_two, detector=detector_2)
self.create_detector_workflow(workflow=self.workflow_three, detector=detector_3)
with outbox_runner():
self.get_success_response(
self.organization.slug,
qs_params={"project": str(self.project.id)},
status_code=204,
)
# Ensure the workflows are scheduled for deletion
self.workflow.refresh_from_db()
self.workflow_two.refresh_from_db()
assert self.workflow.status == ObjectStatus.PENDING_DELETION
assert self.workflow_two.status == ObjectStatus.PENDING_DELETION
assert RegionScheduledDeletion.objects.filter(
model_name="Workflow",
object_id=self.workflow.id,
).exists()
assert RegionScheduledDeletion.objects.filter(
model_name="Workflow",
object_id=self.workflow_two.id,
).exists()
# Delete the workflows
with self.tasks():
run_scheduled_deletions()
# Ensure workflows are removed
assert not Workflow.objects.filter(id=self.workflow.id).exists()
assert not Workflow.objects.filter(id=self.workflow_two.id).exists()
# Workflow linked to other project should be unaffected
self.assert_unaffected_workflows([self.workflow_three])
def test_delete_workflows_no_parameters_error(self) -> None:
response = self.get_error_response(
self.organization.slug,
status_code=400,
)
assert "At least one of 'id', 'query', 'project', or 'projectSlug' must be provided" in str(
response.data["detail"]
)
# Verify no workflows were affected
self.assert_unaffected_workflows([self.workflow, self.workflow_two, self.workflow_three])
def test_delete_no_matching_workflows(self) -> None:
# Test deleting workflows with non-existent ID
response = self.get_success_response(
self.organization.slug,
qs_params={"id": "999999"},
status_code=200,
)
assert "No workflows found" in str(response.data["detail"])
# Verify no workflows were affected
self.assert_unaffected_workflows([self.workflow, self.workflow_two, self.workflow_three])
# Test deleting workflows with non-matching query
self.get_success_response(
self.organization.slug,
qs_params={"query": "nonexistent-workflow-name"},
status_code=200,
)
assert "No workflows found" in str(response.data["detail"])
# Verify no workflows were affected
self.assert_unaffected_workflows([self.workflow, self.workflow_two, self.workflow_three])
def test_delete_workflows_invalid_id_format(self) -> None:
response = self.get_error_response(
self.organization.slug,
qs_params={"id": "not-a-number"},
status_code=400,
)
assert "Invalid ID format" in str(response.data["id"])
def test_delete_workflows_filtering_ignored_with_ids(self) -> None:
# Link workflow to project via detector
detector = self.create_detector(project=self.project)
self.create_detector_workflow(workflow=self.workflow, detector=detector)
# Other filters should be ignored when specific IDs are provided
with outbox_runner():
self.get_success_response(
self.organization.slug,
qs_params={
"id": str(self.workflow_two.id),
"project": str(self.project.id),
},
status_code=204,
)
# Ensure the workflow is scheduled for deletion
self.workflow_two.refresh_from_db()
assert self.workflow_two.status == ObjectStatus.PENDING_DELETION
assert RegionScheduledDeletion.objects.filter(
model_name="Workflow",
object_id=self.workflow_two.id,
).exists()
# Delete the workflows
with self.tasks():
run_scheduled_deletions()
# Ensure workflow is removed
assert not Workflow.objects.filter(id=self.workflow_two.id).exists()
# Other workflows should be unaffected
self.assert_unaffected_workflows([self.workflow, self.workflow_three])
def test_delete_workflows_audit_entry(self) -> None:
with outbox_runner():
self.get_success_response(
self.organization.slug,
qs_params={"id": str(self.workflow.id)},
status_code=204,
)
assert_org_audit_log_exists(
organization=self.organization,
event=audit_log.get_event_id("WORKFLOW_REMOVE"),
target_object=self.workflow.id,
actor=self.user,
)
| OrganizationWorkflowDeleteTest |
python | aio-libs__aiohttp | tests/test_web_exceptions.py | {
"start": 5184,
"end": 6846
} | class ____:
def test_ctor_all(self) -> None:
resp = web.HTTPOk(
headers={"X-Custom": "value"},
reason="Done",
text="text",
content_type="custom",
)
assert resp.text == "text"
compare: Mapping[str, str] = {"X-Custom": "value", "Content-Type": "custom"}
assert resp.headers == compare
assert resp.reason == "Done"
assert resp.status == 200
def test_multiline_reason(self) -> None:
with pytest.raises(ValueError, match=r"Reason cannot contain \\n"):
web.HTTPOk(reason="Bad\r\nInjected-header: foo")
def test_pickle(self) -> None:
resp = web.HTTPOk(
headers={"X-Custom": "value"},
reason="Done",
text="text",
content_type="custom",
)
resp.foo = "bar" # type: ignore[attr-defined]
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(resp, proto)
resp2 = pickle.loads(pickled)
assert resp2.text == "text"
assert resp2.headers == resp.headers
assert resp2.reason == "Done"
assert resp2.status == 200
assert resp2.foo == "bar"
async def test_app(self, aiohttp_client: AiohttpClient) -> None:
async def handler(request: web.Request) -> NoReturn:
raise web.HTTPOk()
app = web.Application()
app.router.add_get("/", handler)
cli = await aiohttp_client(app)
resp = await cli.get("/")
assert 200 == resp.status
txt = await resp.text()
assert "200: OK" == txt
| TestHTTPOk |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI059.py | {
"start": 1147,
"end": 1216
} | class ____(Generic[T], Generic[K, V]): ... # PYI059
# Negative cases
| C |
python | scrapy__scrapy | tests/test_utils_asyncio.py | {
"start": 730,
"end": 3379
} | class ____:
"""Test for scrapy.utils.asyncio.parallel_asyncio(), based on tests.test_utils_defer.TestParallelAsync."""
CONCURRENT_ITEMS = 50
@staticmethod
async def callable(o: int, results: list[int]) -> None:
if random.random() < 0.4:
# simulate async processing
await asyncio.sleep(random.random() / 8)
# simulate trivial sync processing
results.append(o)
async def callable_wrapped(
self,
o: int,
results: list[int],
parallel_count: list[int],
max_parallel_count: list[int],
) -> None:
parallel_count[0] += 1
max_parallel_count[0] = max(max_parallel_count[0], parallel_count[0])
await self.callable(o, results)
assert parallel_count[0] > 0, parallel_count[0]
parallel_count[0] -= 1
@staticmethod
def get_async_iterable(length: int) -> AsyncGenerator[int, None]:
# simulate a simple callback without delays between results
return as_async_generator(range(length))
@staticmethod
async def get_async_iterable_with_delays(length: int) -> AsyncGenerator[int, None]:
# simulate a callback with delays between some of the results
for i in range(length):
if random.random() < 0.1:
await asyncio.sleep(random.random() / 20)
yield i
@deferred_f_from_coro_f
async def test_simple(self):
for length in [20, 50, 100]:
parallel_count = [0]
max_parallel_count = [0]
results = []
ait = self.get_async_iterable(length)
await _parallel_asyncio(
ait,
self.CONCURRENT_ITEMS,
self.callable_wrapped,
results,
parallel_count,
max_parallel_count,
)
assert list(range(length)) == sorted(results)
assert max_parallel_count[0] <= self.CONCURRENT_ITEMS
@deferred_f_from_coro_f
async def test_delays(self):
for length in [20, 50, 100]:
parallel_count = [0]
max_parallel_count = [0]
results = []
ait = self.get_async_iterable_with_delays(length)
await _parallel_asyncio(
ait,
self.CONCURRENT_ITEMS,
self.callable_wrapped,
results,
parallel_count,
max_parallel_count,
)
assert list(range(length)) == sorted(results)
assert max_parallel_count[0] <= self.CONCURRENT_ITEMS
@pytest.mark.only_asyncio
| TestParallelAsyncio |
python | redis__redis-py | redis/commands/search/index_definition.py | {
"start": 131,
"end": 2489
} | class ____:
"""IndexDefinition is used to define a index definition for automatic
indexing on Hash or Json update."""
def __init__(
self,
prefix=[],
filter=None,
language_field=None,
language=None,
score_field=None,
score=1.0,
payload_field=None,
index_type=None,
):
self.args = []
self._append_index_type(index_type)
self._append_prefix(prefix)
self._append_filter(filter)
self._append_language(language_field, language)
self._append_score(score_field, score)
self._append_payload(payload_field)
def _append_index_type(self, index_type):
"""Append `ON HASH` or `ON JSON` according to the enum."""
if index_type is IndexType.HASH:
self.args.extend(["ON", "HASH"])
elif index_type is IndexType.JSON:
self.args.extend(["ON", "JSON"])
elif index_type is not None:
raise RuntimeError(f"index_type must be one of {list(IndexType)}")
def _append_prefix(self, prefix):
"""Append PREFIX."""
if len(prefix) > 0:
self.args.append("PREFIX")
self.args.append(len(prefix))
for p in prefix:
self.args.append(p)
def _append_filter(self, filter):
"""Append FILTER."""
if filter is not None:
self.args.append("FILTER")
self.args.append(filter)
def _append_language(self, language_field, language):
"""Append LANGUAGE_FIELD and LANGUAGE."""
if language_field is not None:
self.args.append("LANGUAGE_FIELD")
self.args.append(language_field)
if language is not None:
self.args.append("LANGUAGE")
self.args.append(language)
def _append_score(self, score_field, score):
"""Append SCORE_FIELD and SCORE."""
if score_field is not None:
self.args.append("SCORE_FIELD")
self.args.append(score_field)
if score is not None:
self.args.append("SCORE")
self.args.append(score)
def _append_payload(self, payload_field):
"""Append PAYLOAD_FIELD."""
if payload_field is not None:
self.args.append("PAYLOAD_FIELD")
self.args.append(payload_field)
| IndexDefinition |
python | tensorflow__tensorflow | tensorflow/python/saved_model/save_test.py | {
"start": 53271,
"end": 57796
} | class ____(test.TestCase):
def setUp(self):
super(AssetTests, self).setUp()
self._vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt")
with open(self._vocab_path, "w") as f:
f.write("alpha\nbeta\ngamma\n")
def test_asset_path_returned(self):
root = autotrackable.AutoTrackable()
root.path = asset.Asset(self._vocab_path)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
root.get_asset = def_function.function(lambda: root.path.asset_path)
save.save(root, save_dir, signatures=root.get_asset.get_concrete_function())
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
file_io.rename(save_dir, second_dir)
imported_path = _import_and_infer(second_dir, {})["output_0"]
self.assertIn(
compat.as_str_any(second_dir), compat.as_str_any(imported_path))
def test_table(self):
initializer = lookup_ops.TextFileInitializer(
self._vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
root = checkpoint.Checkpoint(
table=lookup_ops.HashTable(initializer, default_value=-1))
root.table_user = def_function.function(
root.table.lookup,
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
self.assertEqual(
2, self.evaluate(root.table_user(constant_op.constant("gamma"))))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
file_io.delete_file(self._vocab_path)
self.assertAllClose({"output_0": [2, 0]},
_import_and_infer(save_dir,
{"keys": ["gamma", "alpha"]}))
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
# Asset paths should track the location the SavedModel is loaded from.
file_io.rename(save_dir, second_dir)
self.assertAllClose({"output_0": [2, 1]},
_import_and_infer(second_dir,
{"keys": ["gamma", "beta"]}))
def test_untracked_table_useful_message(self):
root = module.Module()
initializer = lookup_ops.TextFileInitializer(
self._vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
table = lookup_ops.HashTable(initializer, default_value=-1)
root.table_user = def_function.function(
table.lookup,
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
root.table_user(constant_op.constant("gamma"))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(AssertionError, "HashTable"):
save.save(root, save_dir)
def test_unused_asset(self):
root = autotrackable.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.asset = asset.Asset(self._vocab_path)
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, export_dir)
self.assertAllClose({"output_0": [0.2]},
_import_and_infer(export_dir, {"x": [0.1]}))
def test_sensible_function_building_exception(self):
root = checkpoint.Checkpoint(v=variables.Variable(2.))
root.f = def_function.function(
lambda x: 2. * root.v,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
@def_function.function
def _calls_save():
save.save(root, export_dir)
with self.assertRaisesRegex(AssertionError, "tf.function"):
_calls_save()
def test_rewrite_asset_to_same_destination(self):
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
asset_path = os.path.join(self.get_temp_dir(), "asset")
def save_and_load(label):
with open(asset_path, "w") as f:
f.write(label)
model = autotrackable.AutoTrackable()
model.asset = asset.Asset(asset_path)
model.fn = def_function.function(lambda: io_ops.read_file(model.asset))
self.assertEqual(label, model.fn().numpy().decode("utf-8"))
save.save(model, save_dir)
imported = load.load(save_dir)
self.assertEqual(label, imported.fn().numpy().decode("utf-8"))
save_and_load("first")
save_and_load("second")
| AssetTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor19.py | {
"start": 161,
"end": 252
} | class ____:
pass
a1 = A()
# This should generate an error
a2 = A(1)
a3 = A(*[], **{})
| A |
python | apache__airflow | airflow-core/src/airflow/ti_deps/deps/task_not_running_dep.py | {
"start": 1019,
"end": 1799
} | class ____(BaseTIDep):
"""Ensures that the task instance's state is not running."""
NAME = "Task Instance Not Running"
IGNORABLE = False
def __eq__(self, other: object) -> bool:
"""Check if two task instance dependencies are of the same type."""
return type(self) is type(other)
def __hash__(self):
"""Compute the hash value based on the type of the task instance dependency."""
return hash(type(self))
@provide_session
def _get_dep_statuses(self, ti, session, dep_context=None):
if ti.state != TaskInstanceState.RUNNING:
yield self._passing_status(reason="Task is not in running state.")
return
yield self._failing_status(reason="Task is in the running state")
| TaskNotRunningDep |
python | huggingface__transformers | src/transformers/integrations/mxfp4.py | {
"start": 4824,
"end": 9492
} | class ____(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: Optional[torch.nn.Module] = None,
full_layer_name: str | None = None,
missing_keys: Optional[list[str]] = None,
**kwargs,
) -> dict[str, torch.Tensor]:
param_data = {}
if "_blocks" in input_dict.keys():
if isinstance(input_dict["_blocks"], list):
param_data["_blocks"] = input_dict["_blocks"][0]
else:
param_data["_blocks"] = input_dict["_blocks"]
if "_scales" in input_dict.keys():
if isinstance(input_dict["_scales"], list):
param_data["_scales"] = input_dict["_scales"][0]
else:
param_data["_scales"] = input_dict["_scales"]
# Eagerly set tensors on the module and perform swizzle
module, _ = get_module_from_name(model, full_layer_name)
proj = "gate_up_proj" if "gate_up_proj" in full_layer_name else "down_proj"
swizzle_mxfp4_convertops(
param_data["_blocks"],
param_data["_scales"],
module,
proj,
param_data["_blocks"].device,
triton_kernels_hub,
)
missing_keys.discard(f"{full_layer_name}")
module._is_hf_initialized = True
# We return an empty mapping since the module was updated in-place. This prevents
# the loader from trying to materialize the original meta-parameter names again.
# We don't use set_param_for_module since it expects mainly a torch.nn.Parameter or a safetensors pointer
return {}
# Copied from GPT_OSS repo and vllm
def quantize_to_mxfp4(w, triton_kernels_hub):
downcast_to_mxfp_torch = triton_kernels_hub.numerics_details.mxfp.downcast_to_mxfp_torch
w, w_scale = downcast_to_mxfp_torch(w.to(torch.bfloat16), torch.uint8, axis=1)
return w, w_scale
def swizzle_mxfp4(w, w_scale, triton_kernels_hub):
"""
Changes the layout of the tensors depending on the hardware
"""
FP4, convert_layout, wrap_torch_tensor = (
triton_kernels_hub.tensor.FP4,
triton_kernels_hub.tensor.convert_layout,
triton_kernels_hub.tensor.wrap_torch_tensor,
)
layout = triton_kernels_hub.tensor_details.layout
StridedLayout = triton_kernels_hub.tensor_details.layout.StridedLayout
value_layout, value_layout_opts = layout.make_default_matmul_mxfp4_w_layout(mx_axis=1)
w = convert_layout(wrap_torch_tensor(w, dtype=FP4), value_layout, **value_layout_opts)
w_scale = convert_layout(wrap_torch_tensor(w_scale), StridedLayout)
return w, w_scale
# Copied from GPT_OSS repo
# TODO: Add absolute link when the repo is public
def convert_moe_packed_tensors(
blocks,
scales,
*,
dtype: torch.dtype = torch.bfloat16,
rows_per_chunk: int = 32768 * 1024, # TODO these values are not here by mistake ;)
) -> torch.Tensor:
"""
Convert the mxfp4 weights again, dequantizing and makes them compatible with the forward
pass of GPT_OSS.
"""
import math
blocks = blocks.to(torch.uint8)
# Check if blocks and scales are on CPU, and move to GPU if so
if not blocks.is_cuda and torch.cuda.is_available():
blocks = blocks.cuda()
scales = scales.cuda()
elif (blocks.device.type != "xpu") and is_torch_xpu_available():
blocks = blocks.to("xpu")
scales = scales.to("xpu")
scales = scales.to(torch.int32) - 127 # TODO that's because 128=2**7
assert blocks.shape[:-1] == scales.shape, f"{blocks.shape[:-1]=} does not match {scales.shape=}"
lut = torch.tensor(FP4_VALUES, dtype=dtype, device=blocks.device)
*prefix_shape, G, B = blocks.shape
rows_total = math.prod(prefix_shape) * G
blocks = blocks.reshape(rows_total, B)
scales = scales.reshape(rows_total, 1)
out = torch.empty(rows_total, B * 2, dtype=dtype, device=blocks.device)
for r0 in range(0, rows_total, rows_per_chunk):
r1 = min(r0 + rows_per_chunk, rows_total)
blk = blocks[r0:r1]
exp = scales[r0:r1]
# nibble indices -> int64
idx_lo = (blk & 0x0F).to(torch.long)
idx_hi = (blk >> 4).to(torch.long)
sub = out[r0:r1]
sub[:, 0::2] = lut[idx_lo]
sub[:, 1::2] = lut[idx_hi]
torch.ldexp(sub, exp, out=sub)
del idx_lo, idx_hi, blk, exp, sub
out = out.reshape(*prefix_shape, G, B * 2).view(*prefix_shape, G * B * 2)
del blocks, scales, lut
return out.transpose(1, 2).contiguous()
| Mxfp4Deserialize |
python | allegroai__clearml | clearml/utilities/deferred.py | {
"start": 123,
"end": 1177
} | class ____(object):
@attr.s
class _DeferredAction(object):
method = attr.ib()
args = attr.ib()
kwargs = attr.ib()
def __init__(self, instance: Any) -> None:
self._instance = instance
self._pool = []
self._lock = threading.Lock()
def add(self, callable_: Callable, *args: Any, **kwargs: Any) -> None:
self._pool.append(self._DeferredAction(callable_, args, kwargs))
def clear(self) -> List["DeferredExecutionPool._DeferredAction"]:
with self._lock:
pool = self._pool
self._pool = []
return pool
def apply(self) -> None:
pool = self.clear()
for action in pool:
action.method(self._instance, *action.args, **action.kwargs)
def copy_from(self, other: "DeferredExecutionPool") -> None:
if not isinstance(self._instance, type(other._instance)):
raise ValueError("Copy deferred actions must be with the same instance type")
self._pool = other._pool[:]
| DeferredExecutionPool |
python | google__jax | tests/state_test.py | {
"start": 2072,
"end": 24661
} | class ____(jtu.JaxTestCase):
def test_get_abstract_aval_must_take_in_refs(self):
ref_aval = core.ShapedArray((), jnp.float32)
def f(x_ref):
return [ref_get(x_ref, ())]
with self.assertRaises(ValueError):
pe.trace_to_jaxpr_dynamic(wrap_init(f, 1), [ref_aval])
@parameterized.named_parameters(
dict(testcase_name="trivial_get", ref_shape=(1, 2),
ref_dtype=jnp.float32,
idx=(), out_shape=(1, 2), out_dtype=jnp.float32),
dict(testcase_name="get_with_index", ref_shape=(1, 2),
ref_dtype=jnp.float32,
idx=(0,), out_shape=(2,), out_dtype=jnp.float32),
dict(testcase_name="get_with_nonleading_index", ref_shape=(1, 2),
ref_dtype=jnp.float32,
idx=(slice(None), 0), out_shape=(1,), out_dtype=jnp.float32),
dict(testcase_name="get_with_array_index", ref_shape=(1, 2, 3, 4),
ref_dtype=jnp.float32,
idx=(np.array([0, 1]),), out_shape=(2, 2, 3, 4),
out_dtype=jnp.float32),
dict(testcase_name="get_with_multiple_array_index",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
idx=(np.array([0, 1]), np.array([0, 1])),
out_shape=(2, 2, 4), out_dtype=jnp.float32),
dict(testcase_name="get_with_nonleading_multiple_array_index",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
idx=(slice(None), np.array([0, 1]), slice(None), np.array([0, 1])),
out_shape=(2, 1, 2), out_dtype=jnp.float32),
dict(testcase_name="get_with_nontrivial_slice",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
idx=(slice(0, 1), np.array([0, 1]), slice(None), np.array([0, 1])),
out_shape=(2, 1, 2), out_dtype=jnp.float32),
dict(testcase_name="get_with_nontrivial_slice2",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
idx=(slice(0, 1), slice(1, 3), slice(None), slice(None)),
out_shape=(1, 2, 2, 4), out_dtype=jnp.float32),
dict(testcase_name="get_with_ref_simple_at",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
idx=(slice(1, 3), slice(None), slice(None)),
out_shape=(2, 2, 4), out_dtype=jnp.float32,
at_indices=((0,),)),
dict(testcase_name="get_with_ref_simple_at2",
ref_shape=(6, 1, 3, 2, 4), ref_dtype=jnp.float32,
idx=(slice(0, 2), slice(0, 1), slice(1, 3), slice(None), slice(None)),
out_shape=(2, 1, 2, 2, 4), out_dtype=jnp.float32,
at_indices=((slice(2, 6),),)),
dict(testcase_name="get_with_ref_multiple_at",
ref_shape=(1, 3, 5, 4), ref_dtype=jnp.float32,
idx=(slice(None), slice(None), slice(0, 2)),
out_shape=(3, 1, 2), out_dtype=jnp.float32,
at_indices=((0,), (slice(None), slice(0, 1)))),
)
def test_get_abstract_eval(self, ref_shape, ref_dtype, idx, out_shape=None,
out_dtype=None, at_indices=(),
should_error=False):
ref_aval = AbstractRef(core.ShapedArray(ref_shape, ref_dtype))
def f(x_ref):
for at_idx in at_indices:
x_ref = x_ref.at[at_idx]
out = ref_get(x_ref, idx)
return [out]
if should_error:
with self.assertRaises(Exception):
pe.trace_to_jaxpr_dynamic(wrap_init(f, 1), [ref_aval])
else:
jaxpr, out_avals, _ = pe.trace_to_jaxpr_dynamic(
wrap_init(f, 1), [ref_aval])
self.assertSetEqual(jaxpr.effects,
{ReadEffect(len(jaxpr.constvars))})
self.assertLen(out_avals, 1)
out_aval, = out_avals
self.assertIsInstance(out_aval, core.ShapedArray)
self.assertEqual(out_aval.shape, out_shape)
self.assertEqual(out_aval.dtype, out_dtype)
@parameterized.parameters(
((4, 5), 0, (0,)),
((4, 5), 1, (0,)),
((9, 10, 11, 12), 0, (slice(None), 0, 1)), # Contiguous int indexing
((9, 10, 11, 12), 0, (0, slice(None), 1)), # Non-contiguous int indexing
((9, 10, 11, 12), 1, (slice(None), 0, 1)), # Contiguous after batch
((9, 10, 11, 12), 2, (slice(None), 0, 1)), # Non-contiguous after batch
((9, 10, 11, 12), 3, (slice(None), slice(None), 0)),
# Shaped int indexer, contiguous after batch
((9, 10, 11, 12), 3,
(slice(None), slice(None), np.array([[0,1]]))),
# Shaped int indexer, non-contiguous after batch
((9, 10, 11, 12), 2,
(np.array([[0, 1]]), slice(None), np.array([[0, 1]]))),
)
def test_vmap_of_get_regression(self, shape, in_axes, indexer):
# Regression test for https://github.com/jax-ml/jax/issues/33309
def f(x):
return x[indexer]
x = jnp.ones(shape)
result = jax.vmap(f, in_axes=in_axes)(jax.new_ref(x))
expected = jax.vmap(f, in_axes=in_axes)(x)
self.assertArraysEqual(result, expected)
def test_swap_abstract_eval_must_take_in_refs(self):
ref_aval = core.ShapedArray((), jnp.float32)
val_aval = core.ShapedArray((), jnp.float32)
def f(x_ref, val):
return [ref_swap(x_ref, (), val)]
with self.assertRaises(ValueError):
pe.trace_to_jaxpr_dynamic(wrap_init(f, 2), [ref_aval, val_aval])
@parameterized.named_parameters(
dict(testcase_name="invalid_val_shape", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(2,), val_dtype=jnp.float32,
idx=(), should_error=True),
dict(testcase_name="invalid_val_shape_slice", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(2,), val_dtype=jnp.float32,
idx=(slice(None),), should_error=True),
dict(testcase_name="trivial_swap", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(1, 2), val_dtype=jnp.float32,
idx=(), out_shape=(1, 2), out_dtype=jnp.float32),
dict(testcase_name="bad_dtype", ref_shape=(1, 2),
ref_dtype=jnp.int32, val_shape=(1, 2), val_dtype=jnp.float32,
idx=(), should_error=True),
dict(testcase_name="swap_with_index", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(2,), val_dtype=jnp.float32,
idx=(0,), out_shape=(2,), out_dtype=jnp.float32),
dict(testcase_name="swap_with_nonleading_index", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(1,), val_dtype=jnp.float32,
idx=(slice(None), 0), out_shape=(1,), out_dtype=jnp.float32),
dict(testcase_name="swap_with_nonleading_index_bad_val", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(2,), val_dtype=jnp.float32,
idx=(slice(None), 0), should_error=True),
dict(testcase_name="swap_with_array_index", ref_shape=(1, 2, 3, 4),
ref_dtype=jnp.float32, val_shape=(2, 2, 3, 4), val_dtype=jnp.float32,
idx=(np.array([0, 1]),), out_shape=(2, 2, 3, 4),
out_dtype=jnp.float32),
dict(testcase_name="swap_with_multiple_array_index",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
val_shape=(2, 2, 4), val_dtype=jnp.float32,
idx=(np.array([0, 1]), np.array([0, 1])),
out_shape=(2, 2, 4), out_dtype=jnp.float32),
dict(testcase_name="swap_with_nonleading_multiple_array_index",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
val_shape=(2, 1, 2), val_dtype=jnp.float32,
idx=(slice(None), np.array([0, 1]), slice(None), np.array([0, 1])),
out_shape=(2, 1, 2), out_dtype=jnp.float32),
dict(testcase_name="swap_with_nontrivial_slice",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
idx=(slice(0, 1), np.array([0, 1]), slice(None), np.array([0, 1])),
val_shape=(2, 1, 2), val_dtype=jnp.float32,
out_shape=(2, 1, 2), out_dtype=jnp.float32),
dict(testcase_name="swap_with_nontrivial_slice2",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
idx=(slice(0, 1), slice(1, 3), slice(None), slice(None)),
val_shape=(1, 2, 2, 4), val_dtype=jnp.float32,
out_shape=(1, 2, 2, 4), out_dtype=jnp.float32),
dict(testcase_name="swap_with_ref_simple_at",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
idx=(slice(0, 1), slice(1, 3), slice(None),),
val_shape=(1, 1, 4), val_dtype=jnp.float32,
out_shape=(1, 1, 4), out_dtype=jnp.float32,
at_indices=((0,),),),
dict(testcase_name="swap_with_ref_simple_at2",
ref_shape=(4, 3, 2, 4), ref_dtype=jnp.float32,
idx=(slice(None), slice(0, 1), slice(1, 3), slice(None),),
val_shape=(2, 1, 1, 4), val_dtype=jnp.float32,
out_shape=(2, 1, 1, 4), out_dtype=jnp.float32,
at_indices=((slice(0, 2),),),),
dict(testcase_name="swap_with_ref_multiple_at2",
ref_shape=(1, 4, 3, 2, 4), ref_dtype=jnp.float32,
idx=(slice(None), slice(0, 1), slice(1, 3), slice(None),),
val_shape=(2, 1, 1, 4), val_dtype=jnp.float32,
out_shape=(2, 1, 1, 4), out_dtype=jnp.float32,
at_indices=((slice(None), slice(0, 2),), (0,)),),
)
def test_swap_abstract_eval(self, ref_shape, ref_dtype,
val_shape, val_dtype, idx, out_shape=None, out_dtype=None,
at_indices=(), should_error=False):
ref_aval = AbstractRef(core.ShapedArray(ref_shape, ref_dtype))
val_aval = core.ShapedArray(val_shape, val_dtype)
def f(x_ref, val):
for at_idx in at_indices:
x_ref = x_ref.at[at_idx]
out = ref_swap(x_ref, idx, val)
return [out]
if should_error:
with self.assertRaises(Exception):
pe.trace_to_jaxpr_dynamic(wrap_init(f, 2), [ref_aval, val_aval])
else:
jaxpr, out_avals, _ = pe.trace_to_jaxpr_dynamic(
wrap_init(f, 2), [ref_aval, val_aval])
self.assertSetEqual(jaxpr.effects,
{WriteEffect(len(jaxpr.constvars))})
self.assertLen(out_avals, 1)
out_aval, = out_avals
self.assertIsInstance(out_aval, core.ShapedArray)
self.assertEqual(out_aval.shape, out_shape)
self.assertEqual(out_aval.dtype, out_dtype)
@parameterized.named_parameters(
dict(testcase_name="invalid_val_shape", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(2,), val_dtype=jnp.float32,
idx=(), should_error=True),
dict(testcase_name="invalid_val_shape_slice", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(2,), val_dtype=jnp.float32,
idx=(slice(None),), should_error=True),
dict(testcase_name="trivial_addupdate", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(1, 2), val_dtype=jnp.float32,
idx=(),),
dict(testcase_name="bad_dtype", ref_shape=(1, 2),
ref_dtype=jnp.int32, val_shape=(1, 2), val_dtype=jnp.float32,
idx=(), should_error=True),
dict(testcase_name="addupdate_with_index", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(2,), val_dtype=jnp.float32,
idx=(0,),),
dict(testcase_name="addupdate_with_nonleading_index", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(1,), val_dtype=jnp.float32,
idx=(slice(None), 0)),
dict(testcase_name="addupdate_with_nonleading_index_bad_val", ref_shape=(1, 2),
ref_dtype=jnp.float32, val_shape=(2,), val_dtype=jnp.float32,
idx=(slice(None), 0), should_error=True),
dict(testcase_name="addupdate_with_array_index", ref_shape=(1, 2, 3, 4),
ref_dtype=jnp.float32, val_shape=(2, 2, 3, 4), val_dtype=jnp.float32,
idx=(np.array([0, 1]),)),
dict(testcase_name="addupdate_with_multiple_array_index",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
val_shape=(2, 2, 4), val_dtype=jnp.float32,
idx=(np.array([0, 1]), np.array([0, 1]))),
dict(testcase_name="addupdate_with_nonleading_multiple_array_index",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
val_shape=(2, 1, 2), val_dtype=jnp.float32,
idx=(slice(None), np.array([0, 1]), slice(None), np.array([0, 1]))),
dict(testcase_name="ref_with_simple_at",
ref_shape=(1, 3, 2, 4), ref_dtype=jnp.float32,
val_shape=(2, 2), val_dtype=jnp.float32,
idx=(np.array([0, 1]), slice(None), np.array([0, 1])),
at_indices=((0,),)),
dict(testcase_name="ref_with_simple_at2",
ref_shape=(3, 3, 2, 4), ref_dtype=jnp.float32,
val_shape=(2, 3, 4), val_dtype=jnp.float32,
idx=(np.array([0, 1]), slice(None), np.array([0, 1])),
at_indices=((slice(0, 3),),)),
dict(testcase_name="ref_with_multiple_at",
ref_shape=(3, 3, 2, 4), ref_dtype=jnp.float32,
val_shape=(2, 2), val_dtype=jnp.float32,
idx=(np.array([0, 1]), slice(None), np.array([0, 1])),
at_indices=((slice(0, 3),), (0,))),
dict(testcase_name="ref_with_multiple_at2",
ref_shape=(3, 3, 2, 4), ref_dtype=jnp.float32,
val_shape=(2, 2), val_dtype=jnp.float32,
idx=(np.array([0, 1]), slice(None), np.array([0, 1])),
at_indices=((slice(None), slice(0, 3),), (0,))),
)
def test_addupdate_abstract_eval(self, ref_shape, ref_dtype,
val_shape, val_dtype, idx, at_indices=(), should_error=False):
ref_aval = AbstractRef(core.ShapedArray(ref_shape, ref_dtype))
val_aval = core.ShapedArray(val_shape, val_dtype)
def f(x_ref, val):
for at_idx in at_indices:
x_ref = x_ref.at[at_idx]
ref_addupdate(x_ref, idx, val)
return []
if should_error:
with self.assertRaises(Exception):
pe.trace_to_jaxpr_dynamic(wrap_init(f, 2), [ref_aval, val_aval])
else:
jaxpr, out_avals, _ = pe.trace_to_jaxpr_dynamic(
wrap_init(f, 2), [ref_aval, val_aval])
self.assertSetEqual(jaxpr.effects,
{AccumEffect(len(jaxpr.constvars))})
self.assertLen(out_avals, 0)
def test_addupdate_abstract_eval_must_take_in_refs(self):
ref_aval = core.ShapedArray((), jnp.float32)
val_aval = core.ShapedArray((), jnp.float32)
def f(x_ref, val):
return [ref_addupdate(x_ref, (), val)]
with self.assertRaises(ValueError):
pe.trace_to_jaxpr_dynamic(wrap_init(f, 2), [ref_aval, val_aval])
def test_can_represent_get_and_swap_in_jaxprs(self):
def body(x):
x[()] = jnp.int32(1)
x[()] = jnp.int32(2)
return (x[()],)
jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 1), [shaped_array_ref((), jnp.int32)])
self.assertLen(consts, 0)
self.assertListEqual(out_avals, [core.ShapedArray((), jnp.int32)])
self.assertEqual(jaxpr.eqns[0].primitive, swap_p)
self.assertEqual(jaxpr.eqns[1].primitive, swap_p)
self.assertEqual(jaxpr.eqns[2].primitive, get_p)
def test_can_represent_addupdate_in_jaxprs(self):
def body(x):
ref_addupdate(x, (), jnp.int32(1))
return (x[()],)
jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 1), [shaped_array_ref((), jnp.int32)])
self.assertLen(consts, 0)
self.assertListEqual(out_avals, [core.ShapedArray((), jnp.int32)])
self.assertEqual(jaxpr.eqns[0].primitive, addupdate_p)
def test_get_custom_pretty_printing_rule(self):
def body(x_ref):
x = x_ref[()]
return [x]
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 1), [shaped_array_ref((), jnp.int32)])
self.assertIn("b:i32[] <- a[...]", jaxpr.pretty_print(use_color=False))
def body(x_ref):
x = x_ref[:, 0]
return [x]
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 1), [shaped_array_ref((1, 2), jnp.int32)])
self.assertIn("b:i32[1] <- a[:,0]", jaxpr.pretty_print(use_color=False))
def test_set_custom_pretty_printing_rule(self):
def body(x_ref):
x_ref[()] = jnp.int32(2)
return []
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 1), [shaped_array_ref((), jnp.int32)])
self.assertIn("a[...] <- 2:i32[]", jaxpr.pretty_print(use_color=False))
def body(x_ref, val):
x_ref[:, 0] = val
return []
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 2), [shaped_array_ref((1, 2), jnp.int32),
core.ShapedArray((1,), jnp.int32)])
self.assertIn("a[:,0] <- b", jaxpr.pretty_print(use_color=False))
def test_swap_custom_pretty_printing_rule(self):
def body(x_ref):
x = ref_swap(x_ref, (), jnp.int32(2))
return [x]
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 1), [shaped_array_ref((), jnp.int32)])
self.assertIn("b:i32[], a[...] <- a[...], 2:i32[]", jaxpr.pretty_print(use_color=False))
def body(x_ref, val):
x = ref_swap(x_ref, (slice(None), 0), val)
return [x]
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 2), [shaped_array_ref((1, 2), jnp.int32),
core.ShapedArray((1,), jnp.int32)])
self.assertIn("c:i32[1], a[:,0] <- a[:,0], b",
jaxpr.pretty_print(use_color=False))
def test_addupdate_custom_pretty_printing_rule(self):
def body(x_ref):
ref_addupdate(x_ref, (), jnp.int32(2))
return []
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 1), [shaped_array_ref((), jnp.int32)])
self.assertIn("a[...] += 2", jaxpr.pretty_print(use_color=False))
def body(x_ref, val):
ref_addupdate(x_ref, (slice(None), 0), val)
return []
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 2), [shaped_array_ref((1, 2), jnp.int32),
core.ShapedArray((1,), jnp.int32)])
self.assertIn("a[:,0] += b", jaxpr.pretty_print(use_color=False))
def test_get_jvp(self):
def f(r):
x = r[()]
return jnp.cos(x)
def g(r, rdot):
return jax.jvp(f, (r,), (rdot,))
in_avals = [shaped_array_ref((), jnp.dtype('float32')),
shaped_array_ref((), jnp.dtype('float32'))]
jaxpr, _, _ = pe.trace_to_jaxpr_dynamic(wrap_init(g, 2), in_avals)
self.assertEqual(jaxpr.eqns[0].primitive, get_p)
self.assertEqual(jaxpr.eqns[1].primitive, get_p)
def test_swap_jvp(self):
def f(a):
x = a[()]
a[()] = jnp.sin(x)
return a[()]
def g(r, rdot):
return jax.jvp(f, (r,), (rdot,))
in_avals = [shaped_array_ref((), jnp.dtype('float32')),
shaped_array_ref((), jnp.dtype('float32'))]
jaxpr, _, _ = pe.trace_to_jaxpr_dynamic(wrap_init(g, 2), in_avals)
self.assertEqual(jaxpr.eqns[0].primitive, get_p)
self.assertEqual(jaxpr.eqns[1].primitive, get_p)
self.assertEqual(jaxpr.eqns[2].primitive, lax.sin_p)
self.assertEqual(jaxpr.eqns[3].primitive, lax.cos_p)
self.assertEqual(jaxpr.eqns[4].primitive, lax.mul_p)
self.assertEqual(jaxpr.eqns[5].primitive, swap_p)
self.assertEqual(jaxpr.eqns[6].primitive, swap_p)
def test_addupdate_jvp(self):
def f(a):
ref_addupdate(a, (), jnp.float32(1.))
return a[()]
def g(r, rdot):
return jax.jvp(f, (r,), (rdot,))
in_avals = [shaped_array_ref((), jnp.dtype('float32')),
shaped_array_ref((), jnp.dtype('float32'))]
jaxpr, _, _ = pe.trace_to_jaxpr_dynamic(wrap_init(g, 2), in_avals)
self.assertEqual(jaxpr.eqns[0].primitive, addupdate_p)
self.assertEqual(jaxpr.eqns[1].primitive, addupdate_p)
self.assertEqual(jaxpr.eqns[2].primitive, get_p)
self.assertEqual(jaxpr.eqns[3].primitive, get_p)
@jtu.sample_product(
[dict(ref_shape=ref_shape, ref_bdim=ref_bdim, idx_shape=idx_shape,
indexed_dims=indexed_dims, idx_bdims=idx_bdims, out_bdim=out_bdim)
for ref_shape in [(1,), (2, 3), (4, 5, 6)]
for ref_bdim in range(1 + len(ref_shape))
for idx_shape in [(), (1,), (2,), (5, 6)]
for indexed_dims in it.product([True, False], repeat=len(ref_shape))
for idx_bdims in it.product([None, *range(1 + len(idx_shape))],
repeat=sum(indexed_dims))
for out_bdim in range(1 + len(ref_shape) - sum(indexed_dims)
+ len(idx_shape) * any(indexed_dims))
],
op=[
lambda x_ref, indexer: [x_ref[indexer]],
lambda x_ref, indexer: [
ref_swap(x_ref, indexer, jnp.ones_like(x_ref[indexer]))],
lambda x_ref, indexer: (
ref_addupdate(x_ref, indexer, jnp.ones_like(x_ref[indexer]))
or [jnp.ones_like(x_ref[indexer])]),
],
)
def test_vmap(self, ref_shape, ref_bdim, idx_shape, indexed_dims,
idx_bdims, out_bdim, op):
intx = dtypes.default_int_dtype()
floatx = dtypes.default_float_dtype()
axis_size = 7
def maybe_insert(shape, idx):
if idx is None:
return shape
return tuple_insert(shape, idx, axis_size)
batched_ref_shape = maybe_insert(ref_shape, ref_bdim)
ref_aval = shaped_array_ref(ref_shape, floatx)
bat_ref_aval = shaped_array_ref(batched_ref_shape, floatx)
idx_avals = [core.ShapedArray(idx_shape, intx)
for _ in idx_bdims]
bat_idx_avals = [
core.ShapedArray(maybe_insert(idx_shape, idx_bdim), intx)
for idx_bdim in idx_bdims]
def f(x_ref, *idxs):
idxs_ = iter(idxs)
indexer = tuple(next(idxs_) if b else slice(None) for b in indexed_dims)
return op(x_ref, indexer)
rng = self.rng()
a = rng.randn(*bat_ref_aval.shape).astype(floatx)
his = [d for d, b in zip(ref_aval.shape, indexed_dims) if b]
idxs = [rng.randint(low=0, high=hi, size=i.shape, dtype=intx)
for i, hi in zip(bat_idx_avals, his)]
# discharge-of-vmap
f_batched = jax.vmap(f, in_axes=(ref_bdim, *idx_bdims), out_axes=[out_bdim])
stateful_jaxpr, _, stateful_consts = pe.trace_to_jaxpr_dynamic(
wrap_init(f_batched, 1 + len(bat_idx_avals)), [bat_ref_aval, *bat_idx_avals])
jaxpr, consts = discharge_state(stateful_jaxpr, stateful_consts)
discharge_of_vmap_ans = core.eval_jaxpr(jaxpr, consts, a, *idxs)
# vmap-of-discharge
stateful_jaxpr, _, stateful_consts = pe.trace_to_jaxpr_dynamic(
wrap_init(f, 1 + len(idx_avals)), [ref_aval, *idx_avals])
jaxpr_, consts_ = discharge_state(stateful_jaxpr, stateful_consts)
f_batched = jax.vmap(partial(core.eval_jaxpr, jaxpr_, consts_),
in_axes=(ref_bdim, *idx_bdims),
out_axes=[out_bdim, ref_bdim])
vmap_of_discharge_ans = f_batched(a, *idxs)
self.assertAllClose(discharge_of_vmap_ans, vmap_of_discharge_ans,
check_dtypes=False)
| StatePrimitivesTest |
python | walkccc__LeetCode | solutions/1611. Minimum One Bit Operations to Make Integers Zero/1611.py | {
"start": 0,
"end": 1123
} | class ____:
def minimumOneBitOperations(self, n: int) -> int:
# Observation: e.g. n = 2^2
# 100 (2^2 needs 2^3 - 1 ops)
# op1 -> 101
# op2 -> 111
# op1 -> 110
# op2 -> 010 (2^1 needs 2^2 - 1 ops)
# op1 -> 011
# op2 -> 001 (2^0 needs 2^1 - 1 ops)
# op1 -> 000
#
# So 2^k needs 2^(k + 1) - 1 ops. Note this is reversible, i.e., 0 -> 2^k
# also takes 2^(k + 1) - 1 ops.
# e.g. n = 1XXX, our first goal is to change 1XXX -> 1100.
# - If the second bit is 1, you only need to consider the cost of turning
# the last 2 bits to 0.
# - If the second bit is 0, you need to add up the cost of flipping the
# second bit from 0 to 1.
# XOR determines the cost minimumOneBitOperations(1XXX^1100) accordingly.
# Then, 1100 -> 0100 needs 1 op. Finally, 0100 -> 0 needs 2^3 - 1 ops.
if n == 0:
return 0
# x is the largest 2^k <= n.
# x | x >> 1 -> x >> 1 needs 1 op.
# x >> 1 -> 0 needs x = 2^k - 1 ops.
x = 1 << n.bit_length() - 1
return self.minimumOneBitOperations(n ^ (x | x >> 1)) + 1 + x - 1
| Solution |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 28190,
"end": 28806
} | class ____(object):
def __init__(self,inst):
if isinstance(inst,TokenStream):
self.inst = inst
return
raise TypeError("TokenStreamIterator requires TokenStream object")
def next(self):
assert self.inst
item = self.inst.nextToken()
if not item or item.isEOF():
raise StopIteration()
return item
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenStreamSelector ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| TokenStreamIterator |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 605898,
"end": 606222
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("app", "context")
app = sgqlc.types.Field("App", graphql_name="app")
context = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="context")
| RequiredStatusCheckDescription |
python | huggingface__transformers | src/transformers/models/informer/modeling_informer.py | {
"start": 28468,
"end": 31781
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: InformerConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
if config.attention_type == "prob":
self.self_attn = InformerProbSparseAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
sampling_factor=config.sampling_factor,
)
else:
self.self_attn = InformerAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
| InformerEncoderLayer |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/shortcuts/progress_bar/base.py | {
"start": 8743,
"end": 9954
} | class ____(UIControl):
"""
User control for the progress bar.
"""
def __init__(
self,
progress_bar: ProgressBar,
formatter: Formatter,
cancel_callback: Callable[[], None] | None,
) -> None:
self.progress_bar = progress_bar
self.formatter = formatter
self._key_bindings = create_key_bindings(cancel_callback)
def create_content(self, width: int, height: int) -> UIContent:
items: list[StyleAndTextTuples] = []
for pr in self.progress_bar.counters:
try:
text = self.formatter.format(self.progress_bar, pr, width)
except BaseException:
traceback.print_exc()
text = "ERROR"
items.append(to_formatted_text(text))
def get_line(i: int) -> StyleAndTextTuples:
return items[i]
return UIContent(get_line=get_line, line_count=len(items), show_cursor=False)
def is_focusable(self) -> bool:
return True # Make sure that the key bindings work.
def get_key_bindings(self) -> KeyBindings:
return self._key_bindings
_CounterItem = TypeVar("_CounterItem", covariant=True)
| _ProgressControl |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/redshift_cluster.py | {
"start": 20777,
"end": 23231
} | class ____(AwsBaseOperator[RedshiftHook]):
"""
Deletes the specified manual snapshot.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftDeleteClusterSnapshotOperator`
:param snapshot_identifier: A unique identifier for the snapshot that you are requesting
:param cluster_identifier: The unique identifier of the cluster the snapshot was created from
:param wait_for_completion: Whether wait for cluster deletion or not
The default value is ``True``
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check snapshot state
"""
aws_hook_class = RedshiftHook
template_fields: Sequence[str] = aws_template_fields(
"cluster_identifier",
"snapshot_identifier",
)
def __init__(
self,
*,
snapshot_identifier: str,
cluster_identifier: str,
wait_for_completion: bool = True,
poll_interval: int = 10,
**kwargs,
):
super().__init__(**kwargs)
self.snapshot_identifier = snapshot_identifier
self.cluster_identifier = cluster_identifier
self.wait_for_completion = wait_for_completion
self.poll_interval = poll_interval
def execute(self, context: Context) -> Any:
self.hook.conn.delete_cluster_snapshot(
SnapshotClusterIdentifier=self.cluster_identifier,
SnapshotIdentifier=self.snapshot_identifier,
)
if self.wait_for_completion:
while self.get_status() is not None:
time.sleep(self.poll_interval)
def get_status(self) -> str:
return self.hook.get_cluster_snapshot_status(
snapshot_identifier=self.snapshot_identifier,
)
| RedshiftDeleteClusterSnapshotOperator |
python | pydata__xarray | xarray/tests/test_coarsen.py | {
"start": 8607,
"end": 11877
} | class ____:
@pytest.mark.parametrize("dask", [True, False])
def test_coarsen_construct(self, dask: bool) -> None:
ds = Dataset(
{
"vart": ("time", np.arange(48), {"a": "b"}),
"varx": ("x", np.arange(10), {"a": "b"}),
"vartx": (("x", "time"), np.arange(480).reshape(10, 48), {"a": "b"}),
"vary": ("y", np.arange(12)),
},
coords={"time": np.arange(48), "y": np.arange(12)},
attrs={"foo": "bar"},
)
if dask and has_dask:
ds = ds.chunk({"x": 4, "time": 10})
expected = xr.Dataset(attrs={"foo": "bar"})
expected["vart"] = (
("year", "month"),
duck_array_ops.reshape(ds.vart.data, (-1, 12)),
{"a": "b"},
)
expected["varx"] = (
("x", "x_reshaped"),
duck_array_ops.reshape(ds.varx.data, (-1, 5)),
{"a": "b"},
)
expected["vartx"] = (
("x", "x_reshaped", "year", "month"),
duck_array_ops.reshape(ds.vartx.data, (2, 5, 4, 12)),
{"a": "b"},
)
expected["vary"] = ds.vary
expected.coords["time"] = (
("year", "month"),
duck_array_ops.reshape(ds.time.data, (-1, 12)),
)
with raise_if_dask_computes():
actual = ds.coarsen(time=12, x=5).construct(
{"time": ("year", "month"), "x": ("x", "x_reshaped")}
)
assert_identical(actual, expected)
with raise_if_dask_computes():
actual = ds.coarsen(time=12, x=5).construct(
time=("year", "month"), x=("x", "x_reshaped")
)
assert_identical(actual, expected)
with raise_if_dask_computes():
actual = ds.coarsen(time=12, x=5).construct(
{"time": ("year", "month"), "x": ("x", "x_reshaped")}, keep_attrs=False
)
for var in actual:
assert actual[var].attrs == {}
assert actual.attrs == {}
with raise_if_dask_computes():
actual = ds.vartx.coarsen(time=12, x=5).construct(
{"time": ("year", "month"), "x": ("x", "x_reshaped")}
)
assert_identical(actual, expected["vartx"])
with pytest.raises(ValueError):
ds.coarsen(time=12).construct(foo="bar")
with pytest.raises(ValueError):
ds.coarsen(time=12, x=2).construct(time=("year", "month"))
with pytest.raises(ValueError):
ds.coarsen(time=12).construct()
with pytest.raises(ValueError):
ds.coarsen(time=12).construct(time="bar")
with pytest.raises(ValueError):
ds.coarsen(time=12).construct(time=("bar",))
def test_coarsen_construct_keeps_all_coords(self):
da = xr.DataArray(np.arange(24), dims=["time"])
da = da.assign_coords(day=365 * da)
result = da.coarsen(time=12).construct(time=("year", "month"))
assert list(da.coords) == list(result.coords)
ds = da.to_dataset(name="T")
ds_result = ds.coarsen(time=12).construct(time=("year", "month"))
assert list(da.coords) == list(ds_result.coords)
| TestCoarsenConstruct |
python | ray-project__ray | rllib/offline/d4rl_reader.py | {
"start": 387,
"end": 1596
} | class ____(InputReader):
"""Reader object that loads the dataset from the D4RL dataset."""
@PublicAPI
def __init__(self, inputs: str, ioctx: IOContext = None):
"""Initializes a D4RLReader instance.
Args:
inputs: String corresponding to the D4RL environment name.
ioctx: Current IO context object.
"""
import d4rl
self.env = gym.make(inputs)
self.dataset = _convert_to_batch(d4rl.qlearning_dataset(self.env))
assert self.dataset.count >= 1
self.counter = 0
@override(InputReader)
def next(self) -> SampleBatchType:
if self.counter >= self.dataset.count:
self.counter = 0
self.counter += 1
return self.dataset.slice(start=self.counter, end=self.counter + 1)
def _convert_to_batch(dataset: Dict) -> SampleBatchType:
# Converts D4RL dataset to SampleBatch
d = {}
d[SampleBatch.OBS] = dataset["observations"]
d[SampleBatch.ACTIONS] = dataset["actions"]
d[SampleBatch.NEXT_OBS] = dataset["next_observations"]
d[SampleBatch.REWARDS] = dataset["rewards"]
d[SampleBatch.TERMINATEDS] = dataset["terminals"]
return SampleBatch(d)
| D4RLReader |
python | scipy__scipy | scipy/stats/tests/test_survival.py | {
"start": 18764,
"end": 21958
} | class ____:
@pytest.mark.parametrize(
"x, y, statistic, pvalue",
# Results validate with R
# library(survival)
# options(digits=16)
#
# futime_1 <- c(8, 12, 26, 14, 21, 27, 8, 32, 20, 40)
# fustat_1 <- c(1, 1, 1, 1, 1, 1, 0, 0, 0, 0)
# rx_1 <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
#
# futime_2 <- c(33, 28, 41, 48, 48, 25, 37, 48, 25, 43)
# fustat_2 <- c(1, 1, 1, 0, 0, 0, 0, 0, 0, 0)
# rx_2 <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
#
# futime <- c(futime_1, futime_2)
# fustat <- c(fustat_1, fustat_2)
# rx <- c(rx_1, rx_2)
#
# survdiff(formula = Surv(futime, fustat) ~ rx)
#
# Also check against another library which handle alternatives
# library(nph)
# logrank.test(futime, fustat, rx, alternative = "two.sided")
# res["test"]
[(
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html
# uncensored, censored
[[8, 12, 26, 14, 21, 27], [8, 32, 20, 40]],
[[33, 28, 41], [48, 48, 25, 37, 48, 25, 43]],
# chi2, ["two-sided", "less", "greater"]
6.91598157449,
[0.008542873404, 0.9957285632979385, 0.004271436702061537]
),
(
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html
[[19, 6, 5, 4], [20, 19, 17, 14]],
[[16, 21, 7], [21, 15, 18, 18, 5]],
0.835004855038,
[0.3608293039, 0.8195853480676912, 0.1804146519323088]
),
(
# Bland, Altman, "The logrank test", BMJ, 2004
# https://www.bmj.com/content/328/7447/1073.short
[[6, 13, 21, 30, 37, 38, 49, 50, 63, 79, 86, 98, 202, 219],
[31, 47, 80, 82, 82, 149]],
[[10, 10, 12, 13, 14, 15, 16, 17, 18, 20, 24, 24, 25, 28, 30,
33, 35, 37, 40, 40, 46, 48, 76, 81, 82, 91, 112, 181],
[34, 40, 70]],
7.49659416854,
[0.006181578637, 0.003090789318730882, 0.9969092106812691]
)]
)
def test_log_rank(self, x, y, statistic, pvalue):
x = stats.CensoredData(uncensored=x[0], right=x[1])
y = stats.CensoredData(uncensored=y[0], right=y[1])
for i, alternative in enumerate(["two-sided", "less", "greater"]):
res = stats.logrank(x=x, y=y, alternative=alternative)
# we return z and use the normal distribution while other framework
# return z**2. The p-value are directly comparable, but we have to
# square the statistic
assert_allclose(res.statistic**2, statistic, atol=1e-10)
assert_allclose(res.pvalue, pvalue[i], atol=1e-10)
def test_raises(self):
sample = stats.CensoredData([1, 2])
msg = r"`y` must be"
with pytest.raises(ValueError, match=msg):
stats.logrank(x=sample, y=[[1, 2]])
msg = r"`x` must be"
with pytest.raises(ValueError, match=msg):
stats.logrank(x=[[1, 2]], y=sample)
| TestLogRank |
python | ipython__ipython | examples/utils/cwd_prompt.py | {
"start": 139,
"end": 584
} | class ____(Prompts):
def in_prompt_tokens(self):
return [(Token, os.getcwd()), (Token.Prompt, ">>>")]
def load_ipython_extension(shell):
new_prompts = MyPrompt(shell)
new_prompts.old_prompts = shell.prompts
shell.prompts = new_prompts
def unload_ipython_extension(shell):
if not hasattr(shell.prompts, "old_prompts"):
print("cannot unload")
else:
shell.prompts = shell.prompts.old_prompts
| MyPrompt |
python | readthedocs__readthedocs.org | readthedocs/gold/forms.py | {
"start": 227,
"end": 469
} | class ____(forms.ModelForm):
"""Gold subscription form."""
class Meta:
model = GoldUser
fields = ["level"]
level = forms.ChoiceField(
required=True,
choices=LEVEL_CHOICES,
)
| GoldSubscriptionForm |
python | redis__redis-py | tests/test_asyncio/test_scripting.py | {
"start": 521,
"end": 5444
} | class ____:
@pytest_asyncio.fixture
async def r(self, create_redis):
redis = await create_redis()
yield redis
await redis.script_flush()
@pytest.mark.asyncio()
async def test_eval(self, r):
await r.flushdb()
await r.set("a", 2)
# 2 * 3 == 6
assert await r.eval(multiply_script, 1, "a", 3) == 6
@pytest.mark.asyncio()
@skip_if_server_version_lt("6.2.0")
async def test_script_flush(self, r):
await r.set("a", 2)
await r.script_load(multiply_script)
await r.script_flush("ASYNC")
await r.set("a", 2)
await r.script_load(multiply_script)
await r.script_flush("SYNC")
await r.set("a", 2)
await r.script_load(multiply_script)
await r.script_flush()
with pytest.raises(exceptions.DataError):
await r.set("a", 2)
await r.script_load(multiply_script)
await r.script_flush("NOTREAL")
@pytest.mark.asyncio()
async def test_evalsha(self, r):
await r.set("a", 2)
sha = await r.script_load(multiply_script)
# 2 * 3 == 6
assert await r.evalsha(sha, 1, "a", 3) == 6
@pytest.mark.asyncio()
async def test_evalsha_script_not_loaded(self, r):
await r.set("a", 2)
sha = await r.script_load(multiply_script)
# remove the script from Redis's cache
await r.script_flush()
with pytest.raises(exceptions.NoScriptError):
await r.evalsha(sha, 1, "a", 3)
@pytest.mark.asyncio()
async def test_script_loading(self, r):
# get the sha, then clear the cache
sha = await r.script_load(multiply_script)
await r.script_flush()
assert await r.script_exists(sha) == [False]
await r.script_load(multiply_script)
assert await r.script_exists(sha) == [True]
@pytest.mark.asyncio()
async def test_script_object(self, r):
await r.script_flush()
await r.set("a", 2)
multiply = r.register_script(multiply_script)
precalculated_sha = multiply.sha
assert precalculated_sha
assert await r.script_exists(multiply.sha) == [False]
# Test second evalsha block (after NoScriptError)
assert await multiply(keys=["a"], args=[3]) == 6
# At this point, the script should be loaded
assert await r.script_exists(multiply.sha) == [True]
# Test that the precalculated sha matches the one from redis
assert multiply.sha == precalculated_sha
# Test first evalsha block
assert await multiply(keys=["a"], args=[3]) == 6
@pytest.mark.asyncio()
async def test_script_object_in_pipeline(self, r):
await r.script_flush()
multiply = r.register_script(multiply_script)
precalculated_sha = multiply.sha
assert precalculated_sha
pipe = r.pipeline()
pipe.set("a", 2)
pipe.get("a")
await multiply(keys=["a"], args=[3], client=pipe)
assert await r.script_exists(multiply.sha) == [False]
# [SET worked, GET 'a', result of multiple script]
assert await pipe.execute() == [True, b"2", 6]
# The script should have been loaded by pipe.execute()
assert await r.script_exists(multiply.sha) == [True]
# The precalculated sha should have been the correct one
assert multiply.sha == precalculated_sha
# purge the script from redis's cache and re-run the pipeline
# the multiply script should be reloaded by pipe.execute()
await r.script_flush()
pipe = r.pipeline()
pipe.set("a", 2)
pipe.get("a")
await multiply(keys=["a"], args=[3], client=pipe)
assert await r.script_exists(multiply.sha) == [False]
# [SET worked, GET 'a', result of multiple script]
assert await pipe.execute() == [True, b"2", 6]
assert await r.script_exists(multiply.sha) == [True]
@pytest.mark.asyncio()
async def test_eval_msgpack_pipeline_error_in_lua(self, r):
msgpack_hello = r.register_script(msgpack_hello_script)
assert msgpack_hello.sha
pipe = r.pipeline()
# avoiding a dependency to msgpack, this is the output of
# msgpack.dumps({"name": "joe"})
msgpack_message_1 = b"\x81\xa4name\xa3Joe"
await msgpack_hello(args=[msgpack_message_1], client=pipe)
assert await r.script_exists(msgpack_hello.sha) == [False]
assert (await pipe.execute())[0] == b"hello Joe"
assert await r.script_exists(msgpack_hello.sha) == [True]
msgpack_hello_broken = r.register_script(msgpack_hello_script_broken)
await msgpack_hello_broken(args=[msgpack_message_1], client=pipe)
with pytest.raises(exceptions.ResponseError) as excinfo:
await pipe.execute()
assert excinfo.type == exceptions.ResponseError
| TestScripting |
python | fastapi__sqlmodel | docs_src/tutorial/delete/tutorial002.py | {
"start": 100,
"end": 2907
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def update_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Spider-Boy")
results = session.exec(statement)
hero_1 = results.one()
print("Hero 1:", hero_1)
statement = select(Hero).where(Hero.name == "Captain North America")
results = session.exec(statement)
hero_2 = results.one()
print("Hero 2:", hero_2)
hero_1.age = 16
hero_1.name = "Spider-Youngster"
session.add(hero_1)
hero_2.name = "Captain North America Except Canada"
hero_2.age = 110
session.add(hero_2)
session.commit()
session.refresh(hero_1)
session.refresh(hero_2)
print("Updated hero 1:", hero_1)
print("Updated hero 2:", hero_2)
def delete_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Spider-Youngster") # (1)!
results = session.exec(statement) # (2)!
hero = results.one() # (3)!
print("Hero: ", hero) # (4)!
session.delete(hero) # (5)!
session.commit() # (6)!
print("Deleted hero:", hero) # (7)!
statement = select(Hero).where(Hero.name == "Spider-Youngster") # (8)!
results = session.exec(statement) # (9)!
hero = results.first() # (10)!
if hero is None: # (11)!
print("There's no hero named Spider-Youngster") # (12)!
# (13)!
def main():
create_db_and_tables()
create_heroes()
update_heroes()
delete_heroes()
if __name__ == "__main__":
main()
| Hero |
python | run-llama__llama_index | llama-index-core/llama_index/core/readers/file/base.py | {
"start": 832,
"end": 5011
} | class ____(ABC):
@abstractmethod
def read_file_content(self, input_file: Path, **kwargs: Any) -> bytes:
"""
Read the bytes content of a file.
Args:
input_file (Path): Path to the file.
Returns:
bytes: File content.
"""
async def aread_file_content(
self, input_file: Path, **kwargs: Any
) -> bytes: # pragma: no cover
"""
A thin wrapper around read_file_content.
Args:
input_file (Path): Path to the file.
Returns:
bytes: File content.
"""
return self.read_file_content(input_file, **kwargs)
def _try_loading_included_file_formats() -> dict[
str, Type[BaseReader]
]: # pragma: no cover
try:
from llama_index.readers.file import (
DocxReader,
EpubReader,
HWPReader,
ImageReader,
IPYNBReader,
MboxReader,
PandasCSVReader,
PandasExcelReader,
PDFReader,
PptxReader,
VideoAudioReader,
) # pants: no-infer-dep
except ImportError:
logger.warning(
"`llama-index-readers-file` package not found, some file readers will not be available "
"if not provided by the `file_extractor` parameter."
)
return {}
default_file_reader_cls: dict[str, Type[BaseReader]] = {
".hwp": HWPReader,
".pdf": PDFReader,
".docx": DocxReader,
".pptx": PptxReader,
".ppt": PptxReader,
".pptm": PptxReader,
".gif": ImageReader,
".jpg": ImageReader,
".png": ImageReader,
".jpeg": ImageReader,
".webp": ImageReader,
".mp3": VideoAudioReader,
".mp4": VideoAudioReader,
".csv": PandasCSVReader,
".epub": EpubReader,
".mbox": MboxReader,
".ipynb": IPYNBReader,
".xls": PandasExcelReader,
".xlsx": PandasExcelReader,
}
return default_file_reader_cls
def _format_file_timestamp(
timestamp: float | None, include_time: bool = False
) -> str | None:
"""
Format file timestamp to a string.
The format will be %Y-%m-%d if include_time is False or missing,
%Y-%m-%dT%H:%M:%SZ if include_time is True.
Args:
timestamp (float): timestamp in float
include_time (bool): whether to include time in the formatted string
Returns:
str: formatted timestamp
None: if the timestamp passed was None
"""
if timestamp is None:
return None
# Convert timestamp to UTC
# Check if timestamp is already a datetime object
if isinstance(timestamp, datetime):
timestamp_dt = timestamp.astimezone(timezone.utc)
else:
timestamp_dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
if include_time:
return timestamp_dt.strftime("%Y-%m-%dT%H:%M:%SZ")
return timestamp_dt.strftime("%Y-%m-%d")
def default_file_metadata_func(
file_path: str, fs: fsspec.AbstractFileSystem | None = None
) -> dict:
"""
Get some handy metadata from filesystem.
Args:
file_path: str: file path in str
"""
fs = fs or get_default_fs()
stat_result = fs.stat(file_path)
try:
file_name = os.path.basename(str(stat_result["name"]))
except Exception as e:
file_name = os.path.basename(file_path)
creation_date = _format_file_timestamp(stat_result.get("created"))
last_modified_date = _format_file_timestamp(stat_result.get("mtime"))
last_accessed_date = _format_file_timestamp(stat_result.get("atime"))
default_meta = {
"file_path": file_path,
"file_name": file_name,
"file_type": mimetypes.guess_type(file_path)[0],
"file_size": stat_result.get("size"),
"creation_date": creation_date,
"last_modified_date": last_modified_date,
"last_accessed_date": last_accessed_date,
}
# Return not null value
return {
meta_key: meta_value
for meta_key, meta_value in default_meta.items()
if meta_value is not None
}
| FileSystemReaderMixin |
python | numpy__numpy | numpy/lib/_user_array_impl.py | {
"start": 791,
"end": 8026
} | class ____:
"""
container(data, dtype=None, copy=True)
Standard container-class for easy multiple-inheritance.
Methods
-------
copy
byteswap
astype
"""
def __init_subclass__(cls) -> None:
# Deprecated in NumPy 2.4, 2025-11-24
import warnings
warnings.warn(
"The numpy.lib.user_array.container class is deprecated and will be "
"removed in a future version.",
DeprecationWarning,
stacklevel=2,
)
def __init__(self, data, dtype=None, copy=True):
self.array = array(data, dtype, copy=copy)
def __repr__(self):
if self.ndim > 0:
return self.__class__.__name__ + repr(self.array)[len("array"):]
else:
return self.__class__.__name__ + "(" + repr(self.array) + ")"
def __array__(self, t=None):
if t:
return self.array.astype(t)
return self.array
# Array as sequence
def __len__(self):
return len(self.array)
def __getitem__(self, index):
return self._rc(self.array[index])
def __setitem__(self, index, value):
self.array[index] = asarray(value, self.dtype)
def __abs__(self):
return self._rc(absolute(self.array))
def __neg__(self):
return self._rc(-self.array)
def __add__(self, other):
return self._rc(self.array + asarray(other))
__radd__ = __add__
def __iadd__(self, other):
add(self.array, other, self.array)
return self
def __sub__(self, other):
return self._rc(self.array - asarray(other))
def __rsub__(self, other):
return self._rc(asarray(other) - self.array)
def __isub__(self, other):
subtract(self.array, other, self.array)
return self
def __mul__(self, other):
return self._rc(multiply(self.array, asarray(other)))
__rmul__ = __mul__
def __imul__(self, other):
multiply(self.array, other, self.array)
return self
def __mod__(self, other):
return self._rc(remainder(self.array, other))
def __rmod__(self, other):
return self._rc(remainder(other, self.array))
def __imod__(self, other):
remainder(self.array, other, self.array)
return self
def __divmod__(self, other):
return (self._rc(divide(self.array, other)),
self._rc(remainder(self.array, other)))
def __rdivmod__(self, other):
return (self._rc(divide(other, self.array)),
self._rc(remainder(other, self.array)))
def __pow__(self, other):
return self._rc(power(self.array, asarray(other)))
def __rpow__(self, other):
return self._rc(power(asarray(other), self.array))
def __ipow__(self, other):
power(self.array, other, self.array)
return self
def __lshift__(self, other):
return self._rc(left_shift(self.array, other))
def __rshift__(self, other):
return self._rc(right_shift(self.array, other))
def __rlshift__(self, other):
return self._rc(left_shift(other, self.array))
def __rrshift__(self, other):
return self._rc(right_shift(other, self.array))
def __ilshift__(self, other):
left_shift(self.array, other, self.array)
return self
def __irshift__(self, other):
right_shift(self.array, other, self.array)
return self
def __and__(self, other):
return self._rc(bitwise_and(self.array, other))
def __rand__(self, other):
return self._rc(bitwise_and(other, self.array))
def __iand__(self, other):
bitwise_and(self.array, other, self.array)
return self
def __xor__(self, other):
return self._rc(bitwise_xor(self.array, other))
def __rxor__(self, other):
return self._rc(bitwise_xor(other, self.array))
def __ixor__(self, other):
bitwise_xor(self.array, other, self.array)
return self
def __or__(self, other):
return self._rc(bitwise_or(self.array, other))
def __ror__(self, other):
return self._rc(bitwise_or(other, self.array))
def __ior__(self, other):
bitwise_or(self.array, other, self.array)
return self
def __pos__(self):
return self._rc(self.array)
def __invert__(self):
return self._rc(invert(self.array))
def _scalarfunc(self, func):
if self.ndim == 0:
return func(self[0])
else:
raise TypeError(
"only rank-0 arrays can be converted to Python scalars.")
def __complex__(self):
return self._scalarfunc(complex)
def __float__(self):
return self._scalarfunc(float)
def __int__(self):
return self._scalarfunc(int)
def __hex__(self):
return self._scalarfunc(hex)
def __oct__(self):
return self._scalarfunc(oct)
def __lt__(self, other):
return self._rc(less(self.array, other))
def __le__(self, other):
return self._rc(less_equal(self.array, other))
def __eq__(self, other):
return self._rc(equal(self.array, other))
def __ne__(self, other):
return self._rc(not_equal(self.array, other))
def __gt__(self, other):
return self._rc(greater(self.array, other))
def __ge__(self, other):
return self._rc(greater_equal(self.array, other))
def copy(self):
""
return self._rc(self.array.copy())
def tobytes(self):
""
return self.array.tobytes()
def byteswap(self):
""
return self._rc(self.array.byteswap())
def astype(self, typecode):
""
return self._rc(self.array.astype(typecode))
def _rc(self, a):
if len(shape(a)) == 0:
return a
else:
return self.__class__(a)
def __array_wrap__(self, *args):
return self.__class__(args[0])
def __setattr__(self, attr, value):
if attr == 'array':
object.__setattr__(self, attr, value)
return
try:
self.array.__setattr__(attr, value)
except AttributeError:
object.__setattr__(self, attr, value)
# Only called after other approaches fail.
def __getattr__(self, attr):
if (attr == 'array'):
return object.__getattribute__(self, attr)
return self.array.__getattribute__(attr)
#############################################################
# Test of class container
#############################################################
if __name__ == '__main__':
temp = reshape(arange(10000), (100, 100))
ua = container(temp)
# new object created begin test
print(dir(ua))
print(shape(ua), ua.shape) # I have changed Numeric.py
ua_small = ua[:3, :5]
print(ua_small)
# this did not change ua[0,0], which is not normal behavior
ua_small[0, 0] = 10
print(ua_small[0, 0], ua[0, 0])
print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
print(less(ua_small, 103), type(less(ua_small, 103)))
print(type(ua_small * reshape(arange(15), shape(ua_small))))
print(reshape(ua_small, (5, 3)))
print(transpose(ua_small))
| container |
python | allegroai__clearml | clearml/backend_api/services/v2_9/events.py | {
"start": 48125,
"end": 48368
} | class ____(Response):
"""
Response of events.download_task_log endpoint.
"""
_service = "events"
_action = "download_task_log"
_version = "2.9"
_schema = {"definitions": {}, "type": "string"}
| DownloadTaskLogResponse |
python | PrefectHQ__prefect | src/integrations/prefect-aws/prefect_aws/settings.py | {
"start": 665,
"end": 1086
} | class ____(PrefectBaseSettings):
model_config = build_settings_config(("integrations", "aws", "ecs", "observer"))
enabled: bool = Field(
default=True,
description="Whether to enable the ECS observer.",
)
sqs: EcsObserverSqsSettings = Field(
description="Settings for controlling ECS observer SQS behavior.",
default_factory=EcsObserverSqsSettings,
)
| EcsObserverSettings |
python | getsentry__sentry | tests/sentry/api/serializers/test_base.py | {
"start": 511,
"end": 893
} | class ____(Serializer):
def get_attrs(self, item_list, user, **kwargs):
return {item: {"child_data": Foo()} for item in item_list}
def serialize(self, obj, attrs, user, **kwargs):
return {
"parent": "something",
"child": serialize(attrs["child_data"], serializer=FailingChildSerializer()),
}
@control_silo_test
| ParentSerializer |
python | simplejson__simplejson | simplejson/ordered_dict.py | {
"start": 154,
"end": 2945
} | class ____(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
key = reversed(self).next() if last else iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| OrderedDict |
python | walkccc__LeetCode | solutions/242. Valid Anagram/242.py | {
"start": 0,
"end": 239
} | class ____:
def isAnagram(self, s: str, t: str) -> bool:
if len(s) != len(t):
return False
count = collections.Counter(s)
count.subtract(collections.Counter(t))
return all(freq == 0 for freq in count.values())
| Solution |
python | numba__numba | numba/cpython/unicode_support.py | {
"start": 13957,
"end": 22215
} | class ____(IntEnum):
LOWER = 0x01
UPPER = 0x02
ALPHA = 0x01 | 0x02
DIGIT = 0x04
ALNUM = 0x01 | 0x02 | 0x04
SPACE = 0x08
XDIGIT = 0x10
# From the definition in CPython's Python/pyctype.c
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Python/pyctype.c#L5 # noqa: E501
_Py_ctype_table = np.array([
0, # 0x0 '\x00'
0, # 0x1 '\x01'
0, # 0x2 '\x02'
0, # 0x3 '\x03'
0, # 0x4 '\x04'
0, # 0x5 '\x05'
0, # 0x6 '\x06'
0, # 0x7 '\x07'
0, # 0x8 '\x08'
_PY_CTF.SPACE, # 0x9 '\t'
_PY_CTF.SPACE, # 0xa '\n'
_PY_CTF.SPACE, # 0xb '\v'
_PY_CTF.SPACE, # 0xc '\f'
_PY_CTF.SPACE, # 0xd '\r'
0, # 0xe '\x0e'
0, # 0xf '\x0f'
0, # 0x10 '\x10'
0, # 0x11 '\x11'
0, # 0x12 '\x12'
0, # 0x13 '\x13'
0, # 0x14 '\x14'
0, # 0x15 '\x15'
0, # 0x16 '\x16'
0, # 0x17 '\x17'
0, # 0x18 '\x18'
0, # 0x19 '\x19'
0, # 0x1a '\x1a'
0, # 0x1b '\x1b'
0, # 0x1c '\x1c'
0, # 0x1d '\x1d'
0, # 0x1e '\x1e'
0, # 0x1f '\x1f'
_PY_CTF.SPACE, # 0x20 ' '
0, # 0x21 '!'
0, # 0x22 '"'
0, # 0x23 '#'
0, # 0x24 '$'
0, # 0x25 '%'
0, # 0x26 '&'
0, # 0x27 "'"
0, # 0x28 '('
0, # 0x29 ')'
0, # 0x2a '*'
0, # 0x2b '+'
0, # 0x2c ','
0, # 0x2d '-'
0, # 0x2e '.'
0, # 0x2f '/'
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x30 '0'
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x31 '1'
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x32 '2'
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x33 '3'
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x34 '4'
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x35 '5'
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x36 '6'
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x37 '7'
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x38 '8'
_PY_CTF.DIGIT | _PY_CTF.XDIGIT, # 0x39 '9'
0, # 0x3a ':'
0, # 0x3b ';'
0, # 0x3c '<'
0, # 0x3d '='
0, # 0x3e '>'
0, # 0x3f '?'
0, # 0x40 '@'
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x41 'A'
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x42 'B'
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x43 'C'
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x44 'D'
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x45 'E'
_PY_CTF.UPPER | _PY_CTF.XDIGIT, # 0x46 'F'
_PY_CTF.UPPER, # 0x47 'G'
_PY_CTF.UPPER, # 0x48 'H'
_PY_CTF.UPPER, # 0x49 'I'
_PY_CTF.UPPER, # 0x4a 'J'
_PY_CTF.UPPER, # 0x4b 'K'
_PY_CTF.UPPER, # 0x4c 'L'
_PY_CTF.UPPER, # 0x4d 'M'
_PY_CTF.UPPER, # 0x4e 'N'
_PY_CTF.UPPER, # 0x4f 'O'
_PY_CTF.UPPER, # 0x50 'P'
_PY_CTF.UPPER, # 0x51 'Q'
_PY_CTF.UPPER, # 0x52 'R'
_PY_CTF.UPPER, # 0x53 'S'
_PY_CTF.UPPER, # 0x54 'T'
_PY_CTF.UPPER, # 0x55 'U'
_PY_CTF.UPPER, # 0x56 'V'
_PY_CTF.UPPER, # 0x57 'W'
_PY_CTF.UPPER, # 0x58 'X'
_PY_CTF.UPPER, # 0x59 'Y'
_PY_CTF.UPPER, # 0x5a 'Z'
0, # 0x5b '['
0, # 0x5c '\\'
0, # 0x5d ']'
0, # 0x5e '^'
0, # 0x5f '_'
0, # 0x60 '`'
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x61 'a'
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x62 'b'
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x63 'c'
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x64 'd'
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x65 'e'
_PY_CTF.LOWER | _PY_CTF.XDIGIT, # 0x66 'f'
_PY_CTF.LOWER, # 0x67 'g'
_PY_CTF.LOWER, # 0x68 'h'
_PY_CTF.LOWER, # 0x69 'i'
_PY_CTF.LOWER, # 0x6a 'j'
_PY_CTF.LOWER, # 0x6b 'k'
_PY_CTF.LOWER, # 0x6c 'l'
_PY_CTF.LOWER, # 0x6d 'm'
_PY_CTF.LOWER, # 0x6e 'n'
_PY_CTF.LOWER, # 0x6f 'o'
_PY_CTF.LOWER, # 0x70 'p'
_PY_CTF.LOWER, # 0x71 'q'
_PY_CTF.LOWER, # 0x72 'r'
_PY_CTF.LOWER, # 0x73 's'
_PY_CTF.LOWER, # 0x74 't'
_PY_CTF.LOWER, # 0x75 'u'
_PY_CTF.LOWER, # 0x76 'v'
_PY_CTF.LOWER, # 0x77 'w'
_PY_CTF.LOWER, # 0x78 'x'
_PY_CTF.LOWER, # 0x79 'y'
_PY_CTF.LOWER, # 0x7a 'z'
0, # 0x7b '{'
0, # 0x7c '|'
0, # 0x7d '}'
0, # 0x7e '~'
0, # 0x7f '\x7f'
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
], dtype=np.intc)
# From the definition in CPython's Python/pyctype.c
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Python/pyctype.c#L145 # noqa: E501
_Py_ctype_tolower = np.array([
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
], dtype=np.uint8)
# From the definition in CPython's Python/pyctype.c
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Python/pyctype.c#L180
_Py_ctype_toupper = np.array([
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
], dtype=np.uint8)
| _PY_CTF |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_comprehend.py | {
"start": 1236,
"end": 2191
} | class ____(BaseAwsLinksTestCase):
link_class = ComprehendPiiEntitiesDetectionLink
def test_extra_link(self, mock_supervisor_comms):
test_job_id = "123-345-678"
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key=self.link_class.key,
value={
"region_name": "eu-west-1",
"aws_domain": self.link_class.get_aws_domain("aws"),
"aws_partition": "aws",
"job_id": test_job_id,
},
)
self.assert_extra_link_url(
expected_url=(
f"https://console.aws.amazon.com/comprehend/home?region=eu-west-1#/analysis-job-details/pii/{test_job_id}"
),
region_name="eu-west-1",
aws_partition="aws",
job_id=test_job_id,
)
| TestComprehendPiiEntitiesDetectionLink |
python | streamlit__streamlit | lib/streamlit/vendor/pympler/asizeof.py | {
"start": 49007,
"end": 50203
} | class ____(object):
"""Internal largest object class."""
deep = 0 # recursion depth
id = 0 # id(obj)
key = None # Typedef
objref = None # obj or Weakref.ref(obj)
pid = 0 # id(parent obj)
size = 0 # size in bytes
weak = False # objref is Weakref.ref
def __init__(self, key, obj, size, deep, pid):
self.deep = deep
self.id = id(obj)
self.key = key
try: # prefer using weak ref
self.objref, self.weak = Weakref.ref(obj), True
except TypeError:
self.objref, self.weak = obj, False
self.pid = pid
self.size = size
def format(self, clip=0, id2x={}):
"""Return this *rank* as string."""
def _ix(_id): # id or parent_id
return id2x.get(_id, "?")
o = self.objref() if self.weak else self.objref
d = (" (at %s)" % (self.deep,)) if self.deep > 0 else _NN
p = (", pix %s" % (_ix(self.pid),)) if self.pid else _NN
return "%s: %s%s, ix %s%s%s" % (
_prepr(self.key, clip=clip),
_repr(o, clip=clip),
_lengstr(o),
_ix(self.id),
d,
p,
)
| _Rank |
python | django-import-export__django-import-export | tests/core/tests/test_resources/test_modelresource/test_data_deletion.py | {
"start": 203,
"end": 6300
} | class ____(TestCase):
def setUp(self):
self.resource = BookResource()
self.book = Book.objects.create(name="Some book")
self.dataset = tablib.Dataset(headers=["id", "name", "author_email", "price"])
row = [self.book.pk, "Some book", "test@example.com", "10.25"]
self.dataset.append(row)
def test_import_data_delete(self):
class B(BookResource):
delete = fields.Field(widget=widgets.BooleanWidget())
def for_delete(self, row, instance):
return self.fields["delete"].clean(row)
row = [self.book.pk, self.book.name, "1"]
dataset = tablib.Dataset(*[row], headers=["id", "name", "delete"])
result = B().import_data(dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(
result.rows[0].import_type, results.RowResult.IMPORT_TYPE_DELETE
)
self.assertFalse(Book.objects.filter(pk=self.book.pk))
self.assertIsNone(result.rows[0].instance)
self.assertIsNone(result.rows[0].original)
def test_import_data_delete_store_instance(self):
class B(BookResource):
delete = fields.Field(widget=widgets.BooleanWidget())
def for_delete(self, row, instance):
return self.fields["delete"].clean(row)
class Meta:
store_instance = True
row = [self.book.pk, self.book.name, "1"]
dataset = tablib.Dataset(*[row], headers=["id", "name", "delete"])
result = B().import_data(dataset, raise_errors=True)
self.assertEqual(
result.rows[0].import_type, results.RowResult.IMPORT_TYPE_DELETE
)
self.assertIsNotNone(result.rows[0].instance)
def test_save_instance_with_dry_run_flag(self):
class B(BookResource):
def before_save_instance(self, instance, row, **kwargs):
super().before_save_instance(instance, row, **kwargs)
dry_run = kwargs.get("dry_run", False)
if dry_run:
self.before_save_instance_dry_run = True
else:
self.before_save_instance_dry_run = False
def save_instance(self, instance, new, row, **kwargs):
super().save_instance(instance, new, row, **kwargs)
dry_run = kwargs.get("dry_run", False)
if dry_run:
self.save_instance_dry_run = True
else:
self.save_instance_dry_run = False
def after_save_instance(self, instance, row, **kwargs):
super().after_save_instance(instance, row, **kwargs)
dry_run = kwargs.get("dry_run", False)
if dry_run:
self.after_save_instance_dry_run = True
else:
self.after_save_instance_dry_run = False
resource = B()
resource.import_data(self.dataset, dry_run=True, raise_errors=True)
self.assertTrue(resource.before_save_instance_dry_run)
self.assertTrue(resource.save_instance_dry_run)
self.assertTrue(resource.after_save_instance_dry_run)
resource.import_data(self.dataset, dry_run=False, raise_errors=True)
self.assertFalse(resource.before_save_instance_dry_run)
self.assertFalse(resource.save_instance_dry_run)
self.assertFalse(resource.after_save_instance_dry_run)
@mock.patch("core.models.Book.save")
def test_save_instance_noop(self, mock_book):
book = Book.objects.first()
self.resource.save_instance(
book, False, None, using_transactions=False, dry_run=True
)
self.assertEqual(0, mock_book.call_count)
@mock.patch("core.models.Book.save")
def test_delete_instance_noop(self, mock_book):
book = Book.objects.first()
self.resource.delete_instance(
book, None, using_transactions=False, dry_run=True
)
self.assertEqual(0, mock_book.call_count)
def test_delete_instance_with_dry_run_flag(self):
class B(BookResource):
delete = fields.Field(widget=widgets.BooleanWidget())
def for_delete(self, row, instance):
return self.fields["delete"].clean(row)
def before_delete_instance(self, instance, row, **kwargs):
super().before_delete_instance(instance, row, **kwargs)
dry_run = kwargs.get("dry_run", False)
if dry_run:
self.before_delete_instance_dry_run = True
else:
self.before_delete_instance_dry_run = False
def delete_instance(self, instance, row, **kwargs):
super().delete_instance(instance, row, **kwargs)
dry_run = kwargs.get("dry_run", False)
if dry_run:
self.delete_instance_dry_run = True
else:
self.delete_instance_dry_run = False
def after_delete_instance(self, instance, row, **kwargs):
super().after_delete_instance(instance, row, **kwargs)
dry_run = kwargs.get("dry_run", False)
if dry_run:
self.after_delete_instance_dry_run = True
else:
self.after_delete_instance_dry_run = False
resource = B()
row = [self.book.pk, self.book.name, "1"]
dataset = tablib.Dataset(*[row], headers=["id", "name", "delete"])
resource.import_data(dataset, dry_run=True, raise_errors=True)
self.assertTrue(resource.before_delete_instance_dry_run)
self.assertTrue(resource.delete_instance_dry_run)
self.assertTrue(resource.after_delete_instance_dry_run)
resource.import_data(dataset, dry_run=False, raise_errors=True)
self.assertFalse(resource.before_delete_instance_dry_run)
self.assertFalse(resource.delete_instance_dry_run)
self.assertFalse(resource.after_delete_instance_dry_run)
| DataDeletionDryRunTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/bigquery.py | {
"start": 3321,
"end": 61920
} | class ____(GoogleBaseHook, DbApiHook):
"""
Interact with BigQuery.
This hook uses the Google Cloud connection.
:param gcp_conn_id: The Airflow connection used for GCP credentials.
:param use_legacy_sql: This specifies whether to use legacy SQL dialect.
:param location: The location of the BigQuery resource.
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:param api_resource_configs: This contains params configuration applied for
Google BigQuery jobs.
:param impersonation_chain: This is the optional service account to
impersonate using short term credentials.
:param impersonation_scopes: Optional list of scopes for impersonated account.
Will override scopes from connection.
:param labels: The BigQuery resource label.
"""
conn_name_attr = "gcp_conn_id"
default_conn_name = "google_cloud_bigquery_default"
conn_type = "gcpbigquery"
hook_name = "Google Bigquery"
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import validators
from wtforms.fields.simple import BooleanField, StringField
from airflow.providers.google.cloud.utils.validators import ValidJson
connection_form_widgets = super().get_connection_form_widgets()
connection_form_widgets["use_legacy_sql"] = BooleanField(lazy_gettext("Use Legacy SQL"))
connection_form_widgets["location"] = StringField(
lazy_gettext("Location"), widget=BS3TextFieldWidget()
)
connection_form_widgets["priority"] = StringField(
lazy_gettext("Priority"),
default="INTERACTIVE",
widget=BS3TextFieldWidget(),
validators=[validators.AnyOf(["INTERACTIVE", "BATCH"])],
)
connection_form_widgets["api_resource_configs"] = StringField(
lazy_gettext("API Resource Configs"), widget=BS3TextFieldWidget(), validators=[ValidJson()]
)
connection_form_widgets["labels"] = StringField(
lazy_gettext("Labels"), widget=BS3TextFieldWidget(), validators=[ValidJson()]
)
connection_form_widgets["labels"] = StringField(
lazy_gettext("Labels"), widget=BS3TextFieldWidget(), validators=[ValidJson()]
)
return connection_form_widgets
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return super().get_ui_field_behaviour()
def __init__(
self,
use_legacy_sql: bool | object = _UNSET,
location: str | None | object = _UNSET,
priority: str | object = _UNSET,
api_resource_configs: dict | None | object = _UNSET,
impersonation_scopes: str | Sequence[str] | None = None,
labels: dict | None | object = _UNSET,
**kwargs,
) -> None:
super().__init__(**kwargs)
# Use sentinel pattern to distinguish "not provided" from "explicitly provided"
if use_legacy_sql is _UNSET:
value = self._get_field("use_legacy_sql", _UNSET)
self.use_legacy_sql: bool = value if value is not None else True
else:
self.use_legacy_sql = use_legacy_sql # type: ignore[assignment]
if location is _UNSET:
self.location: str | None = self._get_field("location", _UNSET)
else:
self.location = location # type: ignore[assignment]
if priority is _UNSET:
value = self._get_field("priority", _UNSET)
self.priority: str = value if value is not None else "INTERACTIVE"
else:
self.priority = priority # type: ignore[assignment]
self.running_job_id: str | None = None
if api_resource_configs is _UNSET:
value = self._get_field("api_resource_configs", _UNSET)
self.api_resource_configs: dict = value if value is not None else {}
else:
self.api_resource_configs = api_resource_configs or {} # type: ignore[assignment]
if labels is _UNSET:
value = self._get_field("labels", _UNSET)
self.labels = value if value is not None else {}
else:
self.labels = labels or {} # type: ignore[assignment]
self.impersonation_scopes: str | Sequence[str] | None = impersonation_scopes
def get_conn(self) -> BigQueryConnection:
"""Get a BigQuery PEP 249 connection object."""
http_authorized = self._authorize()
service = build("bigquery", "v2", http=http_authorized, cache_discovery=False)
return BigQueryConnection(
service=service,
project_id=self.project_id,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
num_retries=self.num_retries,
hook=self,
)
def get_client(self, project_id: str = PROVIDE_PROJECT_ID, location: str | None = None) -> Client:
"""
Get an authenticated BigQuery Client.
:param project_id: Project ID for the project which the client acts on behalf of.
:param location: Default location for jobs / datasets / tables.
"""
return Client(
client_info=CLIENT_INFO,
project=project_id,
location=location,
credentials=self.get_credentials(),
)
def get_uri(self) -> str:
"""Override from ``DbApiHook`` for ``get_sqlalchemy_engine()``."""
return f"bigquery://{self.project_id}"
def get_sqlalchemy_engine(self, engine_kwargs: dict | None = None):
"""
Create an SQLAlchemy engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
"""
if engine_kwargs is None:
engine_kwargs = {}
credentials_path = get_field(self.extras, "key_path")
if credentials_path:
return create_engine(self.get_uri(), credentials_path=credentials_path, **engine_kwargs)
keyfile_dict = get_field(self.extras, "keyfile_dict")
if keyfile_dict:
keyfile_content = keyfile_dict if isinstance(keyfile_dict, dict) else json.loads(keyfile_dict)
return create_engine(self.get_uri(), credentials_info=keyfile_content, **engine_kwargs)
try:
# 1. If the environment variable GOOGLE_APPLICATION_CREDENTIALS is set
# ADC uses the service account key or configuration file that the variable points to.
# 2. If the environment variable GOOGLE_APPLICATION_CREDENTIALS isn't set
# ADC uses the service account that is attached to the resource that is running your code.
return create_engine(self.get_uri(), **engine_kwargs)
except Exception as e:
self.log.error(e)
raise AirflowException(
"For now, we only support instantiating SQLAlchemy engine by"
" using ADC or extra fields `key_path` and `keyfile_dict`."
)
def get_records(self, sql, parameters=None):
if self.location is None:
raise AirflowException("Need to specify 'location' to use BigQueryHook.get_records()")
return super().get_records(sql, parameters=parameters)
@staticmethod
def _resolve_table_reference(
table_resource: dict[str, Any],
project_id: str = PROVIDE_PROJECT_ID,
dataset_id: str | None = None,
table_id: str | None = None,
) -> dict[str, Any]:
try:
# Check if tableReference is present and is valid
TableReference.from_api_repr(table_resource["tableReference"])
except KeyError:
# Something is wrong so we try to build the reference
table_resource["tableReference"] = table_resource.get("tableReference", {})
values = [("projectId", project_id), ("tableId", table_id), ("datasetId", dataset_id)]
for key, value in values:
# Check if value is already present if no use the provided one
resolved_value = table_resource["tableReference"].get(key, value)
if not resolved_value:
# If there's no value in tableReference and provided one is None raise error
raise AirflowException(
f"Table resource is missing proper `tableReference` and `{key}` is None"
)
table_resource["tableReference"][key] = resolved_value
return table_resource
def insert_rows(
self,
table: Any,
rows: Any,
target_fields: Any = None,
commit_every: Any = 1000,
replace: Any = False,
**kwargs,
) -> None:
"""
Insert rows.
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def _get_pandas_df(
self,
sql: str,
parameters: Iterable | Mapping[str, Any] | None = None,
dialect: str | None = None,
**kwargs,
) -> pd.DataFrame:
if dialect is None:
dialect = "legacy" if self.use_legacy_sql else "standard"
credentials, project_id = self.get_credentials_and_project_id()
return read_gbq(sql, project_id=project_id, dialect=dialect, credentials=credentials, **kwargs)
def _get_polars_df(self, sql, parameters=None, dialect=None, **kwargs) -> pl.DataFrame:
try:
import polars as pl
except ImportError:
raise AirflowOptionalProviderFeatureException(
"Polars is not installed. Please install it with `pip install polars`."
)
if dialect is None:
dialect = "legacy" if self.use_legacy_sql else "standard"
credentials, project_id = self.get_credentials_and_project_id()
pandas_df = read_gbq(sql, project_id=project_id, dialect=dialect, credentials=credentials, **kwargs)
return pl.from_pandas(pandas_df)
@overload
def get_df(
self, sql, parameters=None, dialect=None, *, df_type: Literal["pandas"] = "pandas", **kwargs
) -> pd.DataFrame: ...
@overload
def get_df(
self, sql, parameters=None, dialect=None, *, df_type: Literal["polars"], **kwargs
) -> pl.DataFrame: ...
def get_df(
self,
sql,
parameters=None,
dialect=None,
*,
df_type: Literal["pandas", "polars"] = "pandas",
**kwargs,
) -> pd.DataFrame | pl.DataFrame:
"""
Get a DataFrame for the BigQuery results.
The DbApiHook method must be overridden because Pandas doesn't support
PEP 249 connections, except for SQLite.
.. seealso::
https://github.com/pandas-dev/pandas/blob/055d008615272a1ceca9720dc365a2abd316f353/pandas/io/sql.py#L415
https://github.com/pandas-dev/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:param kwargs: (optional) passed into pandas_gbq.read_gbq method
"""
if df_type == "polars":
return self._get_polars_df(sql, parameters, dialect, **kwargs)
if df_type == "pandas":
return self._get_pandas_df(sql, parameters, dialect, **kwargs)
@deprecated(
planned_removal_date="November 30, 2025",
use_instead="airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_df",
category=AirflowProviderDeprecationWarning,
)
def get_pandas_df(self, sql, parameters=None, dialect=None, **kwargs):
return self._get_pandas_df(sql, parameters, dialect, **kwargs)
@GoogleBaseHook.fallback_to_default_project_id
def table_exists(self, dataset_id: str, table_id: str, project_id: str) -> bool:
"""
Check if a table exists in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:param dataset_id: The name of the dataset in which to look for the
table.
:param table_id: The name of the table to check the existence of.
"""
table_reference = TableReference(DatasetReference(project_id, dataset_id), table_id)
try:
self.get_client(project_id=project_id).get_table(table_reference)
return True
except NotFound:
return False
@GoogleBaseHook.fallback_to_default_project_id
def table_partition_exists(
self, dataset_id: str, table_id: str, partition_id: str, project_id: str
) -> bool:
"""
Check if a partition exists in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:param dataset_id: The name of the dataset in which to look for the
table.
:param table_id: The name of the table to check the existence of.
:param partition_id: The name of the partition to check the existence of.
"""
table_reference = TableReference(DatasetReference(project_id, dataset_id), table_id)
try:
return partition_id in self.get_client(project_id=project_id).list_partitions(table_reference)
except NotFound:
return False
@GoogleBaseHook.fallback_to_default_project_id
def create_table(
self,
dataset_id: str,
table_id: str,
table_resource: dict[str, Any] | Table | TableReference | TableListItem,
location: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
exists_ok: bool = True,
schema_fields: list | None = None,
retry: Retry = DEFAULT_RETRY,
timeout: float | None = None,
) -> Table:
"""
Create a new, empty table in the dataset.
:param project_id: Optional. The project to create the table into.
:param dataset_id: Required. The dataset to create the table into.
:param table_id: Required. The Name of the table to be created.
:param table_resource: Required. Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
If ``table`` is a reference, an empty table is created with the specified ID. The dataset that
the table belongs to must already exist.
:param schema_fields: Optional. If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
.. code-block:: python
schema_fields = [
{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"},
]
:param location: Optional. The location used for the operation.
:param exists_ok: Optional. If ``True``, ignore "already exists" errors when creating the table.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
"""
_table_resource: dict[str, Any] = {}
if isinstance(table_resource, Table):
_table_resource = Table.from_api_repr(table_resource) # type: ignore
if schema_fields:
_table_resource["schema"] = {"fields": schema_fields}
table_resource_final = {**table_resource, **_table_resource} # type: ignore
table_resource = self._resolve_table_reference(
table_resource=table_resource_final,
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
table = Table.from_api_repr(table_resource)
result = self.get_client(project_id=project_id, location=location).create_table(
table=table, exists_ok=exists_ok, retry=retry, timeout=timeout
)
get_hook_lineage_collector().add_output_asset(
context=self,
scheme="bigquery",
asset_kwargs={
"project_id": result.project,
"dataset_id": result.dataset_id,
"table_id": result.table_id,
},
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_empty_dataset(
self,
dataset_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
dataset_reference: dict[str, Any] | None = None,
exists_ok: bool = True,
) -> dict[str, Any]:
"""
Create a new empty dataset.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:param dataset_id: The id of dataset. Don't need to provide, if datasetId in dataset_reference.
:param location: (Optional) The geographic location where the dataset should reside.
There is no default value but the dataset will be created in US if nothing is provided.
:param dataset_reference: Dataset reference that could be provided with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:param exists_ok: If ``True``, ignore "already exists" errors when creating the dataset.
"""
dataset_reference = dataset_reference or {}
if "datasetReference" not in dataset_reference:
dataset_reference["datasetReference"] = {}
for param, value in zip(["datasetId", "projectId"], [dataset_id, project_id]):
specified_param = dataset_reference["datasetReference"].get(param)
if specified_param:
if value:
self.log.info(
"`%s` was provided in both `dataset_reference` and as `%s`. "
"Using value from `dataset_reference`",
param,
convert_camel_to_snake(param),
)
continue # use specified value
if not value:
raise ValueError(
f"Please specify `{param}` either in `dataset_reference` "
f"or by providing `{convert_camel_to_snake(param)}`",
)
# dataset_reference has no param but we can fallback to default value
self.log.info(
"%s was not specified in `dataset_reference`. Will use default value %s.", param, value
)
dataset_reference["datasetReference"][param] = value
location = location or self.location
project_id = project_id or self.project_id
if location:
dataset_reference["location"] = dataset_reference.get("location", location)
dataset: Dataset = Dataset.from_api_repr(dataset_reference)
self.log.info("Creating dataset: %s in project: %s ", dataset.dataset_id, dataset.project)
dataset_object = self.get_client(project_id=project_id, location=location).create_dataset(
dataset=dataset, exists_ok=exists_ok
)
self.log.info("Dataset created successfully.")
return dataset_object.to_api_repr()
@GoogleBaseHook.fallback_to_default_project_id
def get_dataset_tables(
self,
dataset_id: str,
project_id: str = PROVIDE_PROJECT_ID,
max_results: int | None = None,
retry: Retry = DEFAULT_RETRY,
) -> list[dict[str, Any]]:
"""
Get the list of tables for a given dataset.
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
:param dataset_id: the dataset ID of the requested dataset.
:param project_id: (Optional) the project of the requested dataset. If None,
self.project_id will be used.
:param max_results: (Optional) the maximum number of tables to return.
:param retry: How to retry the RPC.
:return: List of tables associated with the dataset.
"""
self.log.info("Start getting tables list from dataset: %s.%s", project_id, dataset_id)
tables = self.get_client().list_tables(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
max_results=max_results,
retry=retry,
)
# Convert to a list (consumes all values)
return [t.reference.to_api_repr() for t in tables]
@GoogleBaseHook.fallback_to_default_project_id
def delete_dataset(
self,
dataset_id: str,
project_id: str = PROVIDE_PROJECT_ID,
delete_contents: bool = False,
retry: Retry = DEFAULT_RETRY,
) -> None:
"""
Delete a dataset of Big query in your project.
:param project_id: The name of the project where we have the dataset.
:param dataset_id: The dataset to be delete.
:param delete_contents: If True, delete all the tables in the dataset.
If False and the dataset contains tables, the request will fail.
:param retry: How to retry the RPC.
"""
self.log.info("Deleting from project: %s Dataset:%s", project_id, dataset_id)
self.get_client(project_id=project_id).delete_dataset(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
delete_contents=delete_contents,
retry=retry,
not_found_ok=True,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_table(
self,
table_resource: dict[str, Any],
fields: list[str] | None = None,
dataset_id: str | None = None,
table_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
) -> dict[str, Any]:
"""
Change some fields of a table.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``table``, the field value will be deleted.
If ``table.etag`` is not ``None``, the update will only succeed if
the table on the server has the same ETag. Thus reading a table with
``get_table``, changing its fields, and then passing it to
``update_table`` will ensure that the changes will only be saved if
no modifications to the table occurred since the read.
:param project_id: The project to create the table into.
:param dataset_id: The dataset to create the table into.
:param table_id: The Name of the table to be created.
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
The table has to contain ``tableReference`` or ``project_id``, ``dataset_id`` and ``table_id``
have to be provided.
:param fields: The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
"""
fields = fields or list(table_resource.keys())
table_resource = self._resolve_table_reference(
table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id
)
table = Table.from_api_repr(table_resource)
self.log.info("Updating table: %s", table_resource["tableReference"])
table_object = self.get_client(project_id=project_id).update_table(table=table, fields=fields)
self.log.info("Table %s.%s.%s updated successfully", project_id, dataset_id, table_id)
get_hook_lineage_collector().add_output_asset(
context=self,
scheme="bigquery",
asset_kwargs={
"project_id": table_object.project,
"dataset_id": table_object.dataset_id,
"table_id": table_object.table_id,
},
)
return table_object.to_api_repr()
@GoogleBaseHook.fallback_to_default_project_id
def insert_all(
self,
project_id: str,
dataset_id: str,
table_id: str,
rows: list,
ignore_unknown_values: bool = False,
skip_invalid_rows: bool = False,
fail_on_error: bool = False,
) -> None:
"""
Stream data into BigQuery one record at a time without a load job.
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
:param project_id: The name of the project where we have the table
:param dataset_id: The name of the dataset where we have the table
:param table_id: The name of the table
:param rows: the rows to insert
.. code-block:: python
rows = [{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}]
:param ignore_unknown_values: [Optional] Accept rows that contain values
that do not match the schema. The unknown values are ignored.
The default value is false, which treats unknown values as errors.
:param skip_invalid_rows: [Optional] Insert all valid rows of a request,
even if invalid rows exist. The default value is false, which causes
the entire request to fail if any invalid rows exist.
:param fail_on_error: [Optional] Force the task to fail if any errors occur.
The default value is false, which indicates the task should not fail
even if any insertion errors occur.
"""
self.log.info("Inserting %s row(s) into table %s:%s.%s", len(rows), project_id, dataset_id, table_id)
table_ref = TableReference(dataset_ref=DatasetReference(project_id, dataset_id), table_id=table_id)
bq_client = self.get_client(project_id=project_id)
table = bq_client.get_table(table_ref)
errors = bq_client.insert_rows(
table=table,
rows=rows,
ignore_unknown_values=ignore_unknown_values,
skip_invalid_rows=skip_invalid_rows,
)
if errors:
error_msg = f"{len(errors)} insert error(s) occurred. Details: {errors}"
self.log.error(error_msg)
if fail_on_error:
raise AirflowException(f"BigQuery job failed. Error was: {error_msg}")
else:
self.log.info("All row(s) inserted successfully: %s:%s.%s", project_id, dataset_id, table_id)
@GoogleBaseHook.fallback_to_default_project_id
def update_dataset(
self,
fields: Sequence[str],
dataset_resource: dict[str, Any],
dataset_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry = DEFAULT_RETRY,
) -> Dataset:
"""
Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
:param dataset_resource: Dataset resource that will be provided
in request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:param dataset_id: The id of the dataset.
:param fields: The properties of ``dataset`` to change (e.g. "friendly_name").
:param project_id: The Google Cloud Project ID
:param retry: How to retry the RPC.
"""
dataset_resource["datasetReference"] = dataset_resource.get("datasetReference", {})
for key, value in zip(["datasetId", "projectId"], [dataset_id, project_id]):
spec_value = dataset_resource["datasetReference"].get(key)
if value and not spec_value:
dataset_resource["datasetReference"][key] = value
self.log.info("Start updating dataset")
dataset = self.get_client(project_id=project_id).update_dataset(
dataset=Dataset.from_api_repr(dataset_resource),
fields=fields,
retry=retry,
)
self.log.info("Dataset successfully updated: %s", dataset)
return dataset
@GoogleBaseHook.fallback_to_default_project_id
def get_datasets_list(
self,
project_id: str = PROVIDE_PROJECT_ID,
include_all: bool = False,
filter_: str | None = None,
max_results: int | None = None,
page_token: str | None = None,
retry: Retry = DEFAULT_RETRY,
return_iterator: bool = False,
) -> list[DatasetListItem] | HTTPIterator:
"""
Get all BigQuery datasets in the current project.
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud Project for which you try to get all datasets
:param include_all: True if results include hidden datasets. Defaults to False.
:param filter_: An expression for filtering the results by label. For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
:param filter_: str
:param max_results: Maximum number of datasets to return.
:param max_results: int
:param page_token: Token representing a cursor into the datasets. If not passed,
the API will return the first page of datasets. The token marks the beginning of the
iterator to be returned and the value of the ``page_token`` can be accessed at
``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`.
:param page_token: str
:param retry: How to retry the RPC.
:param return_iterator: Instead of returning a list[Row], returns a HTTPIterator
which can be used to obtain the next_page_token property.
"""
iterator = self.get_client(project_id=project_id).list_datasets(
project=project_id,
include_all=include_all,
filter=filter_,
max_results=max_results,
page_token=page_token,
retry=retry,
)
# If iterator is requested, we cannot perform a list() on it to log the number
# of datasets because we will have started iteration
if return_iterator:
# The iterator returned by list_datasets() is a HTTPIterator but annotated
# as Iterator
return iterator # type: ignore
datasets_list = list(iterator)
self.log.info("Datasets List: %s", len(datasets_list))
return datasets_list
@GoogleBaseHook.fallback_to_default_project_id
def get_dataset(self, dataset_id: str, project_id: str = PROVIDE_PROJECT_ID) -> Dataset:
"""
Fetch the dataset referenced by *dataset_id*.
:param dataset_id: The BigQuery Dataset ID
:param project_id: The Google Cloud Project ID
:return: dataset_resource
.. seealso::
For more information, see Dataset Resource content:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
dataset = self.get_client(project_id=project_id).get_dataset(
dataset_ref=DatasetReference(project_id, dataset_id)
)
self.log.info("Dataset Resource: %s", dataset)
return dataset
@GoogleBaseHook.fallback_to_default_project_id
def run_grant_dataset_view_access(
self,
source_dataset: str,
view_dataset: str,
view_table: str,
view_project: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
) -> dict[str, Any]:
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:param view_dataset: the dataset that the view is in
:param view_table: the table of the view
:param project_id: the project of the source dataset. If None,
self.project_id will be used.
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:return: the datasets resource of the source dataset.
"""
view_project = view_project or project_id
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={"projectId": view_project, "datasetId": view_dataset, "tableId": view_table},
)
dataset = self.get_dataset(project_id=project_id, dataset_id=source_dataset)
# Check to see if the view we want to add already exists.
if view_access not in dataset.access_entries:
self.log.info(
"Granting table %s:%s.%s authorized view access to %s:%s dataset.",
view_project,
view_dataset,
view_table,
project_id,
source_dataset,
)
dataset.access_entries += [view_access]
dataset = self.update_dataset(
fields=["access"], dataset_resource=dataset.to_api_repr(), project_id=project_id
)
else:
self.log.info(
"Table %s:%s.%s already has authorized view access to %s:%s dataset.",
view_project,
view_dataset,
view_table,
project_id,
source_dataset,
)
return dataset.to_api_repr()
@GoogleBaseHook.fallback_to_default_project_id
def run_table_upsert(
self, dataset_id: str, table_resource: dict[str, Any], project_id: str = PROVIDE_PROJECT_ID
) -> dict[str, Any]:
"""
Update a table if it exists, otherwise create a new one.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
"""
table_id = table_resource["tableReference"]["tableId"]
table_resource = self._resolve_table_reference(
table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id
)
tables_list_resp = self.get_dataset_tables(dataset_id=dataset_id, project_id=project_id)
if any(table["tableId"] == table_id for table in tables_list_resp):
self.log.info("Table %s:%s.%s exists, updating.", project_id, dataset_id, table_id)
table = self.update_table(table_resource=table_resource)
else:
self.log.info("Table %s:%s.%s does not exist. creating.", project_id, dataset_id, table_id)
table = self.create_table(
dataset_id=dataset_id, table_id=table_id, table_resource=table_resource, project_id=project_id
).to_api_repr()
return table
@GoogleBaseHook.fallback_to_default_project_id
def delete_table(
self,
table_id: str,
not_found_ok: bool = True,
project_id: str = PROVIDE_PROJECT_ID,
) -> None:
"""
Delete an existing table from the dataset.
If the table does not exist, return an error unless *not_found_ok* is
set to True.
:param table_id: A dotted ``(<project>.|<project>:)<dataset>.<table>``
that indicates which table will be deleted.
:param not_found_ok: if True, then return success even if the
requested table does not exist.
:param project_id: the project used to perform the request
"""
self.get_client(project_id=project_id).delete_table(
table=table_id,
not_found_ok=not_found_ok,
)
self.log.info("Deleted table %s", table_id)
table_ref = TableReference.from_string(table_id, default_project=project_id)
get_hook_lineage_collector().add_input_asset(
context=self,
scheme="bigquery",
asset_kwargs={
"project_id": table_ref.project,
"dataset_id": table_ref.dataset_id,
"table_id": table_ref.table_id,
},
)
@GoogleBaseHook.fallback_to_default_project_id
def list_rows(
self,
dataset_id: str,
table_id: str,
max_results: int | None = None,
selected_fields: list[str] | str | None = None,
page_token: str | None = None,
start_index: int | None = None,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
retry: Retry = DEFAULT_RETRY,
return_iterator: bool = False,
) -> list[Row] | RowIterator:
"""
List rows in a table.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:param project_id: Project ID for the project which the client acts on behalf of.
:param location: Default location for job.
:param retry: How to retry the RPC.
:param return_iterator: Instead of returning a list[Row], returns a RowIterator
which can be used to obtain the next_page_token property.
:return: list of rows
"""
location = location or self.location
if isinstance(selected_fields, str):
selected_fields = selected_fields.split(",")
if selected_fields:
selected_fields_sequence = [SchemaField(n, "") for n in selected_fields]
else:
selected_fields_sequence = None
table = self._resolve_table_reference(
table_resource={},
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
iterator = self.get_client(project_id=project_id, location=location).list_rows(
table=Table.from_api_repr(table),
selected_fields=selected_fields_sequence,
max_results=max_results,
page_token=page_token,
start_index=start_index,
retry=retry,
)
if return_iterator:
return iterator
return list(iterator)
@GoogleBaseHook.fallback_to_default_project_id
def get_schema(self, dataset_id: str, table_id: str, project_id: str = PROVIDE_PROJECT_ID) -> dict:
"""
Get the schema for a given dataset and table.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:param project_id: the optional project ID of the requested table.
If not provided, the connector's configured project will be used.
:return: a table schema
"""
table_ref = TableReference(dataset_ref=DatasetReference(project_id, dataset_id), table_id=table_id)
table = self.get_client(project_id=project_id).get_table(table_ref)
return {"fields": [s.to_api_repr() for s in table.schema]}
@GoogleBaseHook.fallback_to_default_project_id
def update_table_schema(
self,
schema_fields_updates: list[dict[str, Any]],
include_policy_tags: bool,
dataset_id: str,
table_id: str,
project_id: str = PROVIDE_PROJECT_ID,
) -> dict[str, Any]:
"""
Update fields within a schema for a given dataset and table.
Note that some fields in schemas are immutable; trying to change them
will cause an exception.
If a new field is included, it will be inserted, which requires all
required fields to be set.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableSchema
:param include_policy_tags: If set to True policy tags will be included in
the update request which requires special permissions even if unchanged
see https://cloud.google.com/bigquery/docs/column-level-security#roles
:param dataset_id: the dataset ID of the requested table to be updated
:param table_id: the table ID of the table to be updated
:param schema_fields_updates: a partial schema resource. See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableSchema
.. code-block:: python
schema_fields_updates = [
{"name": "emp_name", "description": "Some New Description"},
{"name": "salary", "description": "Some New Description"},
{
"name": "departments",
"fields": [
{"name": "name", "description": "Some New Description"},
{"name": "type", "description": "Some New Description"},
],
},
]
:param project_id: The name of the project where we want to update the table.
"""
def _build_new_schema(
current_schema: list[dict[str, Any]], schema_fields_updates: list[dict[str, Any]]
) -> list[dict[str, Any]]:
# Turn schema_field_updates into a dict keyed on field names
schema_fields_updates_dict = {field["name"]: field for field in deepcopy(schema_fields_updates)}
# Create a new dict for storing the new schema, initiated based on the current_schema
# as of Python 3.6, dicts retain order.
new_schema = {field["name"]: field for field in deepcopy(current_schema)}
# Each item in schema_fields_updates contains a potential patch
# to a schema field, iterate over them
for field_name, patched_value in schema_fields_updates_dict.items():
# If this field already exists, update it
if field_name in new_schema:
# If this field is of type RECORD and has a fields key we need to patch it recursively
if "fields" in patched_value:
patched_value["fields"] = _build_new_schema(
new_schema[field_name]["fields"], patched_value["fields"]
)
# Update the new_schema with the patched value
new_schema[field_name].update(patched_value)
# This is a new field, just include the whole configuration for it
else:
new_schema[field_name] = patched_value
return list(new_schema.values())
def _remove_policy_tags(schema: list[dict[str, Any]]):
for field in schema:
if "policyTags" in field:
del field["policyTags"]
if "fields" in field:
_remove_policy_tags(field["fields"])
current_table_schema = self.get_schema(
dataset_id=dataset_id, table_id=table_id, project_id=project_id
)["fields"]
new_schema = _build_new_schema(current_table_schema, schema_fields_updates)
if not include_policy_tags:
_remove_policy_tags(new_schema)
table = self.update_table(
table_resource={"schema": {"fields": new_schema}},
fields=["schema"],
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
return table
@GoogleBaseHook.fallback_to_default_project_id
def poll_job_complete(
self,
job_id: str,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
retry: Retry = DEFAULT_RETRY,
) -> bool:
"""
Check if jobs have completed.
:param job_id: id of the job.
:param project_id: Google Cloud Project where the job is running
:param location: location the job is running
:param retry: How to retry the RPC.
"""
location = location or self.location
job = self.get_client(project_id=project_id, location=location).get_job(job_id=job_id)
return job.done(retry=retry)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_job(
self,
job_id: str,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
) -> None:
"""
Cancel a job and wait for cancellation to complete.
:param job_id: id of the job.
:param project_id: Google Cloud Project where the job is running
:param location: location the job is running
"""
project_id = project_id or self.project_id
location = location or self.location
if self.poll_job_complete(job_id=job_id, project_id=project_id, location=location):
self.log.info("No running BigQuery jobs to cancel.")
return
self.log.info("Attempting to cancel job : %s, %s", project_id, job_id)
self.get_client(location=location, project_id=project_id).cancel_job(job_id=job_id)
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and not job_complete:
polling_attempts += 1
job_complete = self.poll_job_complete(job_id=job_id, project_id=project_id, location=location)
if job_complete:
self.log.info("Job successfully canceled: %s, %s", project_id, job_id)
elif polling_attempts == max_polling_attempts:
self.log.info(
"Stopping polling due to timeout. Job %s, %s "
"has not completed cancel and may or may not finish.",
project_id,
job_id,
)
else:
self.log.info("Waiting for canceled job %s, %s to finish.", project_id, job_id)
time.sleep(5)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.refresh_credentials_retry()
def get_job(
self,
job_id: str,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
) -> BigQueryJob | UnknownJob:
"""
Retrieve a BigQuery job.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param job_id: The ID of the job. The ID must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024
characters.
:param project_id: Google Cloud Project where the job is running.
:param location: Location where the job is running.
"""
client = self.get_client(project_id=project_id, location=location)
job = client.get_job(job_id=job_id, project=project_id, location=location)
return job
@staticmethod
def _custom_job_id(configuration: dict[str, Any]) -> str:
hash_base = json.dumps(configuration, sort_keys=True)
uniqueness_suffix = md5(hash_base.encode()).hexdigest()
microseconds_from_epoch = int(
(datetime.now() - datetime.fromtimestamp(0)) / timedelta(microseconds=1)
)
return f"airflow_{microseconds_from_epoch}_{uniqueness_suffix}"
@GoogleBaseHook.fallback_to_default_project_id
def insert_job(
self,
configuration: dict,
job_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
nowait: bool = False,
retry: Retry = DEFAULT_RETRY,
timeout: float | None = None,
) -> BigQueryJob:
"""
Execute a BigQuery job and wait for it to complete.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
:param job_id: The ID of the job. The ID must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024
characters. If not provided then uuid will be generated.
:param project_id: Google Cloud Project where the job is running.
:param location: Location the job is running.
:param nowait: Whether to insert job without waiting for the result.
:param retry: How to retry the RPC.
:param timeout: The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
:return: The job ID.
"""
location = location or self.location
job_id = job_id or self._custom_job_id(configuration)
client = self.get_client(project_id=project_id, location=location)
job_data = {
"configuration": configuration,
"jobReference": {"jobId": job_id, "projectId": project_id, "location": location},
}
supported_jobs: dict[str, type[CopyJob] | type[QueryJob] | type[LoadJob] | type[ExtractJob]] = {
LoadJob._JOB_TYPE: LoadJob,
CopyJob._JOB_TYPE: CopyJob,
ExtractJob._JOB_TYPE: ExtractJob,
QueryJob._JOB_TYPE: QueryJob,
}
job: type[CopyJob] | type[QueryJob] | type[LoadJob] | type[ExtractJob] | None = None
for job_type, job_object in supported_jobs.items():
if job_type in configuration:
job = job_object
break
if not job:
raise AirflowException(f"Unknown job type. Supported types: {supported_jobs.keys()}")
job_api_repr = job.from_api_repr(job_data, client)
self.log.info("Inserting job %s", job_api_repr.job_id)
if nowait:
# Initiate the job and don't wait for it to complete.
job_api_repr._begin()
else:
# Start the job and wait for it to complete and get the result.
job_api_repr.result(timeout=timeout, retry=retry)
return job_api_repr
def generate_job_id(
self,
job_id: str | None,
dag_id: str,
task_id: str,
logical_date: datetime | None,
configuration: dict,
run_after: pendulum.DateTime | datetime | None = None,
force_rerun: bool = False,
) -> str:
if force_rerun:
hash_base = str(uuid.uuid4())
else:
hash_base = json.dumps(configuration, sort_keys=True)
uniqueness_suffix = md5(hash_base.encode()).hexdigest()
if job_id:
return f"{job_id}_{uniqueness_suffix}"
if logical_date is not None:
if AIRFLOW_V_3_0_PLUS:
warnings.warn(
"The 'logical_date' parameter is deprecated. Please use 'run_after' instead.",
AirflowProviderDeprecationWarning,
stacklevel=1,
)
job_id_timestamp = logical_date
elif run_after is not None:
job_id_timestamp = run_after
else:
job_id_timestamp = pendulum.now("UTC")
job_id = f"airflow_{dag_id}_{task_id}_{job_id_timestamp.isoformat()}_{uniqueness_suffix}"
return re.sub(r"[:\-+.]", "_", job_id)
def get_run_after_or_logical_date(self, context: Context) -> pendulum.DateTime | datetime | None:
dag_run = context.get("dag_run")
if not dag_run:
return pendulum.now("UTC")
if AIRFLOW_V_3_0_PLUS:
return dag_run.start_date
return dag_run.start_date if dag_run.run_type == DagRunType.SCHEDULED else context.get("logical_date")
def split_tablename(
self, table_input: str, default_project_id: str, var_name: str | None = None
) -> tuple[str, str, str]:
if "." not in table_input:
raise ValueError(f"Expected table name in the format of <dataset>.<table>. Got: {table_input}")
if not default_project_id:
raise ValueError("INTERNAL: No default project is specified")
def var_print(var_name):
if var_name is None:
return ""
return f"Format exception for {var_name}: "
if table_input.count(".") + table_input.count(":") > 3:
raise ValueError(f"{var_print(var_name)}Use either : or . to specify project got {table_input}")
cmpt = table_input.rsplit(":", 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(":") <= 1:
if cmpt[-1].count(".") != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise ValueError(
f"{var_print(var_name)}Expect format of (<project:)<dataset>.<table>, got {table_input}"
)
cmpt = rest.split(".")
if len(cmpt) == 3:
if project_id:
raise ValueError(f"{var_print(var_name)}Use either : or . to specify project")
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise ValueError(
f"{var_print(var_name)}Expect format of (<project.|<project:)<dataset>.<table>, "
f"got {table_input}"
)
if project_id is None:
if var_name is not None:
self.log.info(
'Project is not included in %s: %s; using project "%s"',
var_name,
table_input,
default_project_id,
)
project_id = default_project_id
return project_id, dataset_id, table_id
@GoogleBaseHook.fallback_to_default_project_id
def get_query_results(
self,
job_id: str,
location: str,
max_results: int | None = None,
selected_fields: list[str] | str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry = DEFAULT_RETRY,
job_retry: Retry = DEFAULT_JOB_RETRY,
) -> list[dict[str, Any]]:
"""
Get query results given a job_id.
:param job_id: The ID of the job.
The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
dashes (-). The maximum length is 1,024 characters.
:param location: The location used for the operation.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param max_results: The maximum number of records (rows) to be fetched
from the table.
:param project_id: Google Cloud Project where the job ran.
:param retry: How to retry the RPC.
:param job_retry: How to retry failed jobs.
:return: List of rows where columns are filtered by selected fields, when given
:raises: AirflowException
"""
if isinstance(selected_fields, str):
selected_fields = selected_fields.split(",")
job = self.get_job(job_id=job_id, project_id=project_id, location=location)
if not isinstance(job, QueryJob):
raise AirflowException(f"Job '{job_id}' is not a query job")
if job.state != "DONE":
raise AirflowException(f"Job '{job_id}' is not in DONE state")
rows = [dict(row) for row in job.result(max_results=max_results, retry=retry, job_retry=job_retry)]
return [{k: row[k] for k in row if k in selected_fields} for row in rows] if selected_fields else rows
@property
def scopes(self) -> Sequence[str]:
"""
Return OAuth 2.0 scopes.
:return: Returns the scope defined in impersonation_scopes, the connection configuration, or the default scope
"""
scope_value: str | None
if self.impersonation_chain and self.impersonation_scopes:
scope_value = ",".join(self.impersonation_scopes)
else:
scope_value = self._get_field("scope", None)
return _get_scopes(scope_value)
| BigQueryHook |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 665851,
"end": 667401
} | class ____(sgqlc.types.Type):
"""A file in a gist."""
__schema__ = github_schema
__field_names__ = ("encoded_name", "encoding", "extension", "is_image", "is_truncated", "language", "name", "size", "text")
encoded_name = sgqlc.types.Field(String, graphql_name="encodedName")
"""The file name encoded to remove characters that are invalid in URL
paths.
"""
encoding = sgqlc.types.Field(String, graphql_name="encoding")
"""The gist file encoding."""
extension = sgqlc.types.Field(String, graphql_name="extension")
"""The file extension from the file name."""
is_image = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isImage")
"""Indicates if this file is an image."""
is_truncated = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isTruncated")
"""Whether the file's contents were truncated."""
language = sgqlc.types.Field("Language", graphql_name="language")
"""The programming language this file is written in."""
name = sgqlc.types.Field(String, graphql_name="name")
"""The gist file name."""
size = sgqlc.types.Field(Int, graphql_name="size")
"""The gist file size in bytes."""
text = sgqlc.types.Field(
String, graphql_name="text", args=sgqlc.types.ArgDict((("truncate", sgqlc.types.Arg(Int, graphql_name="truncate", default=None)),))
)
"""UTF8 text data or null if the file is binary
Arguments:
* `truncate` (`Int`): Optionally truncate the returned file to
this length.
"""
| GistFile |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams5.py | {
"start": 203,
"end": 305
} | class ____[R: int | str]:
...
# This should generate an error because 'dummy' is not declared.
| ClassB |
python | automl__auto-sklearn | autosklearn/pipeline/components/classification/qda.py | {
"start": 455,
"end": 2847
} | class ____(AutoSklearnClassificationAlgorithm):
def __init__(self, reg_param, random_state=None):
self.reg_param = float(reg_param)
self.estimator = None
def fit(self, X, Y):
import sklearn.discriminant_analysis
estimator = sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis(
reg_param=self.reg_param
)
if len(Y.shape) == 2 and Y.shape[1] > 1:
import sklearn.multiclass
self.estimator = sklearn.multiclass.OneVsRestClassifier(estimator, n_jobs=1)
else:
self.estimator = estimator
self.estimator.fit(X, Y)
if len(Y.shape) == 2 and Y.shape[1] > 1:
problems = []
for est in self.estimator.estimators_:
problem = np.any(np.any([np.any(s <= 0.0) for s in est.scalings_]))
problems.append(problem)
problem = np.any(problems)
else:
problem = np.any(
np.any([np.any(s <= 0.0) for s in self.estimator.scalings_])
)
if problem:
raise ValueError(
"Numerical problems in QDA. QDA.scalings_ " "contains values <= 0.0"
)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
df = self.estimator.predict_proba(X)
return softmax(df)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "QDA",
"name": "Quadratic Discriminant Analysis",
"handles_regression": False,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
reg_param = UniformFloatHyperparameter("reg_param", 0.0, 1.0, default_value=0.0)
cs = ConfigurationSpace()
cs.add_hyperparameter(reg_param)
return cs
| QDA |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 138478,
"end": 141957
} | class ____(Response):
"""
Response of events.get_task_plots endpoint.
:param plots: Plots list
:type plots: Sequence[dict]
:param returned: Number of results returned
:type returned: int
:param total: Total number of results available for this query. In case there
are more than 10000 results it is set to 10000
:type total: float
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
"""
_service = "events"
_action = "get_task_plots"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"plots": {
"description": "Plots list",
"items": {"type": "object"},
"type": ["array", "null"],
},
"returned": {
"description": "Number of results returned",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
"total": {
"description": "Total number of results available for this query. In case there are more than 10000 results it is set to 10000",
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(
self,
plots: Optional[List[dict]] = None,
returned: Optional[int] = None,
total: Optional[float] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetTaskPlotsResponse, self).__init__(**kwargs)
self.plots = plots
self.returned = returned
self.total = total
self.scroll_id = scroll_id
@schema_property("plots")
def plots(self) -> Optional[List[dict]]:
return self._property_plots
@plots.setter
def plots(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_plots = None
return
self.assert_isinstance(value, "plots", (list, tuple))
self.assert_isinstance(value, "plots", (dict,), is_array=True)
self._property_plots = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("total")
def total(self) -> Optional[float]:
return self._property_total
@total.setter
def total(self, value: Optional[float]) -> None:
if value is None:
self._property_total = None
return
self.assert_isinstance(value, "total", six.integer_types + (float,))
self._property_total = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetTaskPlotsResponse |
python | numba__llvmlite | llvmlite/tests/test_ir.py | {
"start": 31012,
"end": 32499
} | class ____(TestBase):
def test_attributes(self):
func = self.function()
block = ir.Block(parent=func, name='start')
self.assertIs(block.parent, func)
self.assertFalse(block.is_terminated)
def test_descr(self):
block = self.block(name='my_block')
self.assertEqual(self.descr(block), "my_block:\n")
block.instructions.extend(['a', 'b'])
self.assertEqual(self.descr(block), "my_block:\n a\n b\n")
def test_replace(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.add(a, b, 'c')
d = builder.sub(a, b, 'd')
builder.mul(d, b, 'e')
f = ir.Instruction(block, a.type, 'sdiv', (c, b), 'f')
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"d" = sub i32 %".1", %".2"
%"e" = mul i32 %"d", %".2"
""")
block.replace(d, f)
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"f" = sdiv i32 %"c", %".2"
%"e" = mul i32 %"f", %".2"
""")
def test_repr(self):
"""
Blocks should have a useful repr()
"""
func = self.function()
block = ir.Block(parent=func, name='start')
self.assertEqual(repr(block), "<ir.Block 'start' of type 'label'>")
| TestBlock |
python | django__django | django/db/models/expressions.py | {
"start": 24062,
"end": 27280
} | class ____(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def _resolve_output_field(self):
# We avoid using super() here for reasons given in
# Expression._resolve_output_field()
combined_type = _resolve_combined_type(
self.connector,
type(self.lhs._output_field_or_none),
type(self.rhs._output_field_or_none),
)
if combined_type is None:
raise FieldError(
f"Cannot infer type of {self.connector!r} expression involving these "
f"types: {self.lhs.output_field.__class__.__name__}, "
f"{self.rhs.output_field.__class__.__name__}. You must set "
f"output_field."
)
return combined_type()
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
resolved = super().resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
if not isinstance(self, (DurationExpression, TemporalSubtraction)):
try:
lhs_type = resolved.lhs.output_field.get_internal_type()
except (AttributeError, FieldError):
lhs_type = None
try:
rhs_type = resolved.rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
rhs_type = None
if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type:
return DurationExpression(
resolved.lhs, resolved.connector, resolved.rhs
)
datetime_fields = {"DateField", "DateTimeField", "TimeField"}
if (
self.connector == self.SUB
and lhs_type in datetime_fields
and lhs_type == rhs_type
):
return TemporalSubtraction(resolved.lhs, resolved.rhs)
return resolved
@cached_property
def allowed_default(self):
return self.lhs.allowed_default and self.rhs.allowed_default
| CombinedExpression |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/retrieval_qa/base.py | {
"start": 9858,
"end": 11859
} | class ____(BaseRetrievalQA):
"""Chain for question-answering against a vector database."""
vectorstore: VectorStore = Field(exclude=True, alias="vectorstore")
"""Vector Database to connect to."""
k: int = 4
"""Number of documents to query for."""
search_type: str = "similarity"
"""Search type to use over vectorstore. `similarity` or `mmr`."""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
@model_validator(mode="before")
@classmethod
def validate_search_type(cls, values: dict) -> Any:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "mmr"):
msg = f"search_type of {search_type} not allowed."
raise ValueError(msg)
return values
@override
def _get_docs(
self,
question: str,
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(
question,
k=self.k,
**self.search_kwargs,
)
elif self.search_type == "mmr":
docs = self.vectorstore.max_marginal_relevance_search(
question,
k=self.k,
**self.search_kwargs,
)
else:
msg = f"search_type of {self.search_type} not allowed."
raise ValueError(msg)
return docs
async def _aget_docs(
self,
question: str,
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
msg = "VectorDBQA does not support async"
raise NotImplementedError(msg)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "vector_db_qa"
| VectorDBQA |
python | FactoryBoy__factory_boy | factory/declarations.py | {
"start": 20801,
"end": 21841
} | class ____(BaseDeclaration):
"""Declarations to be called once the model object has been generated."""
FACTORY_BUILDER_PHASE = enums.BuilderPhase.POST_INSTANTIATION
def evaluate_post(self, instance, step, overrides):
context = self.unroll_context(instance, step, overrides)
postgen_context = PostGenerationContext(
value_provided=bool('' in context),
value=context.get(''),
extra={k: v for k, v in context.items() if k != ''},
)
return self.call(instance, step, postgen_context)
def call(self, instance, step, context): # pragma: no cover
"""Call this hook; no return value is expected.
Args:
instance (object): the newly generated object
step (bool): whether the object was 'built' or 'created'
context: a declarations.PostGenerationContext containing values
extracted from the containing factory's declaration
"""
raise NotImplementedError()
| PostGenerationDeclaration |
python | aio-libs__aiohttp | aiohttp/http_parser.py | {
"start": 1883,
"end": 2140
} | class ____(NamedTuple):
method: str
path: str
version: HttpVersion
headers: CIMultiDictProxy[str]
raw_headers: RawHeaders
should_close: bool
compression: str | None
upgrade: bool
chunked: bool
url: URL
| RawRequestMessage |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/exceptions.py | {
"start": 161,
"end": 2532
} | class ____:
class BaseBulkException(AirbyteTracedException):
"""Base BULK Job Exception"""
failure_type: FailureType = FailureType.config_error
def __init__(self, message: str, **kwargs) -> None:
super().__init__(internal_message=message, failure_type=self.failure_type, **kwargs)
class BulkJobError(BaseBulkException):
"""Raised when there are BULK Job Errors in response"""
class BulkJobNonHandableError(BaseBulkException):
"""Raised when there are non-actionable BULK Job Errors in response"""
failure_type: FailureType = FailureType.system_error
class BulkJobBadResponse(BaseBulkException):
"""Raised when the requests.Response object could not be parsed"""
class BulkJobResultUrlError(BaseBulkException):
"""Raised when BULK Job has ACCESS_DENIED status"""
class BulkRecordProduceError(BaseBulkException):
"""Raised when there are error producing records from BULK Job result"""
class BulkJobFailed(BaseBulkException):
"""Raised when BULK Job has FAILED status"""
class BulkJobCanceled(BaseBulkException):
"""Raised when BULK Job has CANCELED status"""
failure_type: FailureType = FailureType.system_error
class BulkJobTimout(BaseBulkException):
"""Raised when BULK Job has TIMEOUT status"""
class BulkJobAccessDenied(BaseBulkException):
"""Raised when BULK Job has ACCESS_DENIED status"""
class BulkJobCreationFailedConcurrentError(BaseBulkException):
"""Raised when an attempt to create a job as failed because of concurrency limits."""
failure_type: FailureType = FailureType.transient_error
class BulkJobCheckpointCollisionError(BaseBulkException):
"""Raised when an attempt to create a job using the `checkpointed cursor` value goes into inf.loop."""
failure_type: FailureType = FailureType.transient_error
class BulkJobRedirectToOtherShopError(BaseBulkException):
"""Raised when the response contains another shop name"""
failure_type: FailureType = FailureType.transient_error
class BulkJobConcurrentError(BaseBulkException):
"""Raised when failing the job after hitting too many BulkJobCreationFailedConcurrentError."""
failure_type: FailureType = FailureType.transient_error
| ShopifyBulkExceptions |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/activation.py | {
"start": 212,
"end": 1360
} | class ____(torch.nn.ReLU):
r"""Applies the element-wise function:
:math:`\text{ReLU6}(x) = \min(\max(x_0, x), q(6))`, where :math:`x_0` is the
zero_point, and :math:`q(6)` is the quantized representation of number 6.
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: ../scripts/activation_images/ReLU6.png
Examples::
>>> m = nn.quantized.ReLU6()
>>> input = torch.randn(2)
>>> # xdoctest: +SKIP
>>> input = torch.quantize_per_tensor(input, 1.0, 0, dtype=torch.qint32)
>>> output = m(input)
"""
def __init__(self, inplace=False):
super().__init__(inplace)
self.inplace = inplace
def forward(self, input):
return torch.ops.quantized.relu6(input, self.inplace)
def _get_name(self):
return "QuantizedReLU6"
@staticmethod
def from_float(mod, use_precomputed_fake_quant=False):
return ReLU6(mod.inplace)
| ReLU6 |
python | spyder-ide__spyder | spyder/plugins/editor/utils/editor.py | {
"start": 5843,
"end": 23193
} | class ____(object):
"""
Text helper helps you manipulate the content of CodeEditor and extends the
Qt text api for an easier usage.
FIXME: Some of this methods are already implemented in CodeEditor, move
and unify redundant methods.
"""
@property
def _editor(self):
try:
return self._editor_ref()
except TypeError:
return self._editor_ref
def __init__(self, editor):
""":param editor: The editor to work on."""
try:
self._editor_ref = weakref.ref(editor)
except TypeError:
self._editor_ref = editor
def goto_line(self, line, column=0, end_column=0, move=True, word=''):
"""
Moves the text cursor to the specified position.
:param line: Number of the line to go to (0 based)
:param column: Optional column number. Default is 0 (start of line).
:param move: True to move the cursor. False will return the cursor
without setting it on the editor.
:param word: Highlight the word, when moving to the line.
:return: The new text cursor
:rtype: QtGui.QTextCursor
"""
line = min(line, self.line_count())
text_cursor = self._move_cursor_to(line)
if column:
text_cursor.movePosition(
QTextCursor.MoveOperation.Right,
QTextCursor.MoveMode.MoveAnchor,
column
)
if end_column:
text_cursor.movePosition(
QTextCursor.MoveOperation.Right,
QTextCursor.MoveMode.KeepAnchor,
end_column
)
if move:
block = text_cursor.block()
self.unfold_if_colapsed(text_cursor)
self._editor.setTextCursor(text_cursor)
if self._editor.isVisible():
self._editor.centerCursor()
else:
self._editor.focus_in.connect(
self._editor.center_cursor_on_next_focus)
if word and str(word) in str(block.text()):
self._editor.find(word, QTextDocument.FindCaseSensitively)
return text_cursor
def unfold_if_colapsed(self, cursor):
"""Unfold parent fold trigger if the block is collapsed.
:param block: Block to unfold.
"""
block = cursor.block()
try:
folding_panel = self._editor.panels.get('FoldingPanel')
except KeyError:
pass
else:
if block.isVisible():
return
fold_start_line = block.blockNumber()
# Find the innermost code folding region for the current position
enclosing_regions = sorted(list(
folding_panel.current_tree[fold_start_line]))
folding_status = folding_panel.folding_status
if len(enclosing_regions) > 0:
for region in enclosing_regions:
fold_start_line = region.begin
block = self._editor.document().findBlockByNumber(
fold_start_line)
if fold_start_line in folding_status:
fold_status = folding_status[fold_start_line]
if fold_status:
folding_panel.toggle_fold_trigger(block)
self._editor.setTextCursor(cursor)
if self._editor.isVisible():
self._editor.centerCursor()
def selected_text(self):
"""Returns the selected text."""
return self._editor.textCursor().selectedText()
def word_under_cursor(self, select_whole_word=False, text_cursor=None):
"""
Gets the word under cursor using the separators defined by
:attr:`spyder.plugins.editor.widgets.codeeditor.CodeEditor.word_separators`.
FIXME: This is not working because CodeEditor have no attribute
word_separators
.. note: Instead of returning the word string, this function returns
a QTextCursor, that way you may get more information than just the
string. To get the word, just call ``selectedText`` on the returned
value.
:param select_whole_word: If set to true the whole word is selected,
else the selection stops at the cursor position.
:param text_cursor: Optional custom text cursor (e.g. from a
QTextDocument clone)
:returns: The QTextCursor that contains the selected word.
"""
editor = self._editor
if not text_cursor:
text_cursor = editor.textCursor()
word_separators = editor.word_separators
end_pos = start_pos = text_cursor.position()
# select char by char until we are at the original cursor position.
while not text_cursor.atStart():
text_cursor.movePosition(
QTextCursor.MoveOperation.Left,
QTextCursor.MoveMode.KeepAnchor,
1
)
try:
char = text_cursor.selectedText()[0]
word_separators = editor.word_separators
selected_txt = text_cursor.selectedText()
if (selected_txt in word_separators and
(selected_txt != "n" and selected_txt != "t") or
char.isspace()):
break # start boundary found
except IndexError:
break # nothing selectable
start_pos = text_cursor.position()
text_cursor.setPosition(start_pos)
if select_whole_word:
# select the resot of the word
text_cursor.setPosition(end_pos)
while not text_cursor.atEnd():
text_cursor.movePosition(
QTextCursor.MoveOperation.Right,
QTextCursor.MoveMode.KeepAnchor,
1
)
char = text_cursor.selectedText()[0]
selected_txt = text_cursor.selectedText()
if (selected_txt in word_separators and
(selected_txt != "n" and selected_txt != "t") or
char.isspace()):
break # end boundary found
end_pos = text_cursor.position()
text_cursor.setPosition(end_pos)
# now that we habe the boundaries, we can select the text
text_cursor.setPosition(start_pos)
text_cursor.setPosition(end_pos, QTextCursor.MoveMode.KeepAnchor)
return text_cursor
def word_under_mouse_cursor(self):
"""
Selects the word under the **mouse** cursor.
:return: A QTextCursor with the word under mouse cursor selected.
"""
editor = self._editor
text_cursor = editor.cursorForPosition(editor._last_mouse_pos)
text_cursor = self.word_under_cursor(True, text_cursor)
return text_cursor
def cursor_position(self):
"""
Returns the QTextCursor position. The position is a tuple made up of
the line number (0 based) and the column number (0 based).
:return: tuple(line, column)
"""
return (self._editor.textCursor().blockNumber(),
self._editor.textCursor().columnNumber())
def current_line_nbr(self):
"""
Returns the text cursor's line number.
:return: Line number
"""
return self.cursor_position()[0]
def current_column_nbr(self):
"""
Returns the text cursor's column number.
:return: Column number
"""
return self.cursor_position()[1]
def line_count(self):
"""
Returns the line count of the specified editor.
:return: number of lines in the document.
"""
return self._editor.document().blockCount()
def line_text(self, line_nbr):
"""
Gets the text of the specified line.
:param line_nbr: The line number of the text to get
:return: Entire line's text
:rtype: str
"""
doc = self._editor.document()
block = doc.findBlockByNumber(line_nbr)
return block.text()
def previous_line_text(self):
"""
Gets the previous line text (relative to the current cursor pos).
:return: previous line text (str)
"""
if self.current_line_nbr():
return self.line_text(self.current_line_nbr() - 1)
return ''
def current_line_text(self):
"""
Returns the text of the current line.
:return: Text of the current line
"""
return self.line_text(self.current_line_nbr())
def set_line_text(self, line_nbr, new_text):
"""
Replace an entire line with ``new_text``.
:param line_nbr: line number of the line to change.
:param new_text: The replacement text.
"""
editor = self._editor
text_cursor = self._move_cursor_to(line_nbr)
text_cursor.select(QTextCursor.SelectionType.LineUnderCursor)
text_cursor.insertText(new_text)
editor.setTextCursor(text_cursor)
def remove_last_line(self):
"""Removes the last line of the document."""
editor = self._editor
text_cursor = editor.textCursor()
text_cursor.movePosition(
QTextCursor.MoveOperation.End,
QTextCursor.MoveMode.MoveAnchor
)
text_cursor.select(QTextCursor.SelectionType.LineUnderCursor)
text_cursor.removeSelectedText()
text_cursor.deletePreviousChar()
editor.setTextCursor(text_cursor)
def _move_cursor_to(self, line):
cursor = self._editor.textCursor()
block = self._editor.document().findBlockByNumber(line-1)
cursor.setPosition(block.position())
return cursor
def select_lines(self, start=0, end=-1, apply_selection=True):
"""
Selects entire lines between start and end line numbers.
This functions apply the selection and returns the text cursor that
contains the selection.
Optionally it is possible to prevent the selection from being applied
on the code editor widget by setting ``apply_selection`` to False.
:param start: Start line number (0 based)
:param end: End line number (0 based). Use -1 to select up to the
end of the document
:param apply_selection: True to apply the selection before returning
the QTextCursor.
:returns: A QTextCursor that holds the requested selection
"""
editor = self._editor
if end == -1:
end = self.line_count() - 1
if start < 0:
start = 0
text_cursor = self._move_cursor_to(start)
if end > start: # Going down
text_cursor.movePosition(
QTextCursor.MoveOperation.Down,
QTextCursor.MoveMode.KeepAnchor,
end - start
)
text_cursor.movePosition(
QTextCursor.MoveOperation.EndOfLine,
QTextCursor.MoveMode.KeepAnchor
)
elif end < start: # going up
# don't miss end of line !
text_cursor.movePosition(QTextCursor.MoveOperation.EndOfLine,
QTextCursor.MoveMode.MoveAnchor)
text_cursor.movePosition(
QTextCursor.MoveOperation.Up,
QTextCursor.MoveMode.KeepAnchor,
start - end
)
text_cursor.movePosition(
QTextCursor.MoveOperation.StartOfLine,
QTextCursor.MoveMode.KeepAnchor
)
else:
text_cursor.movePosition(
QTextCursor.MoveOperation.EndOfLine,
QTextCursor.MoveMode.KeepAnchor
)
if apply_selection:
editor.setTextCursor(text_cursor)
return text_cursor
def line_pos_from_number(self, line_number):
"""
Computes line position on Y-Axis (at the center of the line) from line
number.
:param line_number: The line number for which we want to know the
position in pixels.
:return: The center position of the line.
"""
editor = self._editor
block = editor.document().findBlockByNumber(line_number)
if block.isValid():
return int(editor.blockBoundingGeometry(block).translated(
editor.contentOffset()).top())
if line_number <= 0:
return 0
else:
return int(editor.blockBoundingGeometry(
block.previous()).translated(editor.contentOffset()).bottom())
def line_nbr_from_position(self, y_pos):
"""
Returns the line number from the y_pos.
:param y_pos: Y pos in the editor
:return: Line number (0 based), -1 if out of range
"""
editor = self._editor
height = editor.fontMetrics().height()
for top, line, block in editor.visible_blocks:
if top <= y_pos <= top + height:
return line
return -1
def mark_whole_doc_dirty(self):
"""
Marks the whole document as dirty to force a full refresh. **SLOW**
"""
text_cursor = self._editor.textCursor()
text_cursor.select(QTextCursor.SelectionType.Document)
self._editor.document().markContentsDirty(text_cursor.selectionStart(),
text_cursor.selectionEnd())
def insert_text(self, text, keep_position=True):
"""
Inserts text at the cursor position.
:param text: text to insert
:param keep_position: Flag that specifies if the cursor position must
be kept. Pass False for a regular insert (the cursor will be at
the end of the inserted text).
"""
text_cursor = self._editor.textCursor()
if keep_position:
s = text_cursor.selectionStart()
e = text_cursor.selectionEnd()
text_cursor.insertText(text)
if keep_position:
text_cursor.setPosition(s)
text_cursor.setPosition(e, QTextCursor.MoveMode.KeepAnchor)
self._editor.setTextCursor(text_cursor)
def search_text(self, text_cursor, search_txt, search_flags):
"""
Searches a text in a text document.
:param text_cursor: Current text cursor
:param search_txt: Text to search
:param search_flags: QTextDocument.FindFlags
:returns: the list of occurrences, the current occurrence index
:rtype: tuple([], int)
"""
def compare_cursors(cursor_a, cursor_b):
"""
Compares two QTextCursor.
:param cursor_a: cursor a
:param cursor_b: cursor b
:returns; True if both cursor are identical (same position, same
selection)
"""
return (cursor_b.selectionStart() >= cursor_a.selectionStart() and
cursor_b.selectionEnd() <= cursor_a.selectionEnd())
text_document = self._editor.document()
occurrences = []
index = -1
cursor = text_document.find(search_txt, 0, search_flags)
original_cursor = text_cursor
while not cursor.isNull():
if compare_cursors(cursor, original_cursor):
index = len(occurrences)
occurrences.append((cursor.selectionStart(),
cursor.selectionEnd()))
cursor.setPosition(cursor.position() + 1)
cursor = text_document.find(search_txt, cursor, search_flags)
return occurrences, index
def is_comment_or_string(self, cursor_or_block, formats=None):
"""
Checks if a block/cursor is a string or a comment.
:param cursor_or_block: QTextCursor or QTextBlock
:param formats: the list of color scheme formats to consider. By
default, it will consider the following keys: 'comment', 'string',
'docstring'.
"""
if formats is None:
formats = ["comment", "string", "docstring"]
layout = None
pos = 0
if isinstance(cursor_or_block, QTextBlock):
pos = len(cursor_or_block.text()) - 1
layout = cursor_or_block.layout()
elif isinstance(cursor_or_block, QTextCursor):
b = cursor_or_block.block()
pos = cursor_or_block.position() - b.position()
layout = b.layout()
if layout is not None:
additional_formats = layout.formats()
sh = self._editor.syntax_highlighter
if sh:
ref_formats = sh.color_scheme.formats
for r in additional_formats:
if r.start <= pos < (r.start + r.length):
for fmt_type in formats:
is_user_obj = (r.format.objectType() ==
r.format.UserObject)
if (ref_formats[fmt_type] == r.format and
is_user_obj):
return True
return False
| TextHelper |
python | openai__openai-python | src/openai/resources/evals/runs/runs.py | {
"start": 22911,
"end": 23611
} | class ____:
def __init__(self, runs: Runs) -> None:
self._runs = runs
self.create = to_streamed_response_wrapper(
runs.create,
)
self.retrieve = to_streamed_response_wrapper(
runs.retrieve,
)
self.list = to_streamed_response_wrapper(
runs.list,
)
self.delete = to_streamed_response_wrapper(
runs.delete,
)
self.cancel = to_streamed_response_wrapper(
runs.cancel,
)
@cached_property
def output_items(self) -> OutputItemsWithStreamingResponse:
return OutputItemsWithStreamingResponse(self._runs.output_items)
| RunsWithStreamingResponse |
python | sanic-org__sanic | sanic/asgi.py | {
"start": 4069,
"end": 9402
} | class ____:
sanic_app: Sanic
request: Request
transport: MockTransport
lifespan: Lifespan
ws: Optional[WebSocketConnection]
stage: Stage
response: Optional[BaseHTTPResponse]
@classmethod
async def create(
cls,
sanic_app: Sanic,
scope: ASGIScope,
receive: ASGIReceive,
send: ASGISend,
) -> ASGIApp:
instance = cls()
instance.ws = None
instance.sanic_app = sanic_app
instance.transport = MockTransport(scope, receive, send)
instance.transport.loop = sanic_app.loop
instance.stage = Stage.IDLE
instance.response = None
instance.sanic_app.state.is_started = True
setattr(instance.transport, "add_task", sanic_app.loop.create_task)
try:
headers = Header(
[
(
key.decode("ASCII"),
value.decode(errors="surrogateescape"),
)
for key, value in scope.get("headers", [])
]
)
except UnicodeDecodeError:
raise BadRequest(
"Header names can only contain US-ASCII characters"
)
if scope["type"] == "http":
version = scope["http_version"]
method = scope["method"]
elif scope["type"] == "websocket":
version = "1.1"
method = "GET"
instance.ws = instance.transport.create_websocket_connection(
send, receive
)
else:
raise ServerError("Received unknown ASGI scope")
url_bytes, query = scope["raw_path"], scope["query_string"]
if query:
# httpx ASGI client sends query string as part of raw_path
url_bytes = url_bytes.split(b"?", 1)[0]
# All servers send them separately
url_bytes = b"%b?%b" % (url_bytes, query)
request_class = sanic_app.request_class or Request # type: ignore
instance.request = request_class(
url_bytes,
headers,
version,
method,
instance.transport,
sanic_app,
)
request_class._current.set(instance.request)
instance.request.stream = instance # type: ignore
instance.request_body = True
instance.request.conn_info = ConnInfo(instance.transport)
await instance.sanic_app.dispatch(
"http.lifecycle.request",
inline=True,
context={"request": instance.request},
fail_not_found=False,
)
return instance
async def read(self) -> Optional[bytes]:
"""
Read and stream the body in chunks from an incoming ASGI message.
"""
if self.stage is Stage.IDLE:
self.stage = Stage.REQUEST
message = await self.transport.receive()
body = message.get("body", b"")
if not message.get("more_body", False):
self.request_body = False
if not body:
return None
return body
async def __aiter__(self):
while self.request_body:
data = await self.read()
if data:
yield data
def respond(self, response: BaseHTTPResponse):
if self.stage is not Stage.HANDLER:
self.stage = Stage.FAILED
raise RuntimeError("Response already started")
if self.response is not None:
self.response.stream = None
response.stream, self.response = self, response
return response
async def send(self, data, end_stream):
if self.stage is Stage.IDLE:
if not end_stream or data:
raise RuntimeError(
"There is no request to respond to, either the "
"response has already been sent or the "
"request has not been received yet."
)
return
if self.response and self.stage is Stage.HANDLER:
await self.transport.send(
{
"type": "http.response.start",
"status": self.response.status,
"headers": self.response.processed_headers,
}
)
response_body = getattr(self.response, "body", None)
if response_body:
data = response_body + data if data else response_body
self.stage = Stage.IDLE if end_stream else Stage.RESPONSE
await self.transport.send(
{
"type": "http.response.body",
"body": data.encode() if hasattr(data, "encode") else data,
"more_body": not end_stream,
}
)
_asgi_single_callable = True # We conform to ASGI 3.0 single-callable
async def __call__(self) -> None:
"""
Handle the incoming request.
"""
try:
self.stage = Stage.HANDLER
await self.sanic_app.handle_request(self.request)
except Exception as e:
try:
await self.sanic_app.handle_exception(self.request, e)
except Exception as exc:
await self.sanic_app.handle_exception(self.request, exc, False)
| ASGIApp |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/tests/conftest.py | {
"start": 5690,
"end": 11217
} | class ____:
def __init__(self, credentials=None, project=None):
self.credentials = credentials
self.project = project
self._secrets = {}
def create_secret(self, request=None, parent=None, secret_id=None, **kwds):
response = MagicMock()
if request:
parent = request.parent
secret_id = request.secret_id
name = f"{parent}/secrets/{secret_id}"
response.name = name
self._secrets[name] = None
return response
def add_secret_version(self, request=None, parent=None, payload=None, **kwds):
response = MagicMock()
if request:
parent = request.parent
if parent not in self._secrets:
raise ApiCoreNotFound(f"{parent!r} does not exist.")
response.name = parent
return response
def access_secret_version(self, request=None, name=None, **kwds):
response = MagicMock()
payload = MagicMock()
payload.data = "secret_data".encode("utf-8")
response.payload = payload
return response
def delete_secret(self, request=None, name=None, **kwds):
return name
def destroy_secret_version(self, name, **kwds):
return name
@pytest.fixture
def mock_credentials(monkeypatch):
mock_credentials = MagicMock(name="MockGoogleCredentials")
mock_authenticated_credentials = MagicMock(token="my-token")
monkeypatch.setattr(
"prefect_gcp.credentials.Credentials", # noqa
mock_credentials,
)
mock_auth = MagicMock()
mock_auth.default.return_value = (mock_authenticated_credentials, "project")
monkeypatch.setattr(
"prefect_gcp.credentials.google.auth", # noqa
mock_auth,
)
monkeypatch.setattr(
"prefect_gcp.credentials.Credentials.from_service_account_info.universe_domain", # noqa
"fake_universe_domain",
)
return mock_credentials
@pytest.fixture
def job_service_client():
job_service_client_mock = MagicMock()
custom_run = MagicMock(name="mock_name")
job_service_client_mock.create_custom_job.return_value = custom_run
error = MagicMock(message="")
custom_run_final = MagicMock(
name="mock_name",
state=JobState.JOB_STATE_SUCCEEDED,
error=error,
display_name="mock_display_name",
)
job_service_client_mock.get_custom_job.return_value = custom_run_final
return job_service_client_mock
@pytest.fixture
def job_service_async_client():
job_service_client_async_mock = MagicMock()
custom_run = MagicMock(name="mock_name")
job_service_client_async_mock.create_custom_job = AsyncMock(return_value=custom_run)
error = MagicMock(message="")
custom_run_final = MagicMock(
name="mock_name",
state=JobState.JOB_STATE_SUCCEEDED,
error=error,
display_name="mock_display_name",
)
job_service_client_async_mock.get_custom_job = AsyncMock(
return_value=custom_run_final
)
job_service_client_async_mock.cancel_custom_job = AsyncMock()
return job_service_client_async_mock
@pytest.fixture()
def service_account_info(monkeypatch):
monkeypatch.setattr(
"google.auth.crypt._cryptography_rsa.serialization.load_pem_private_key",
lambda *args, **kwargs: args[0],
)
_service_account_info = {
"project_id": "my_project",
"token_uri": "my-token-uri",
"client_email": "my-client-email",
"private_key": "my-private-key",
}
return _service_account_info
@pytest.fixture()
def service_account_info_json(monkeypatch):
monkeypatch.setattr(
"google.auth.crypt._cryptography_rsa.serialization.load_pem_private_key",
lambda *args, **kwargs: args[0],
)
_service_account_info = json.dumps(
{
"project_id": "my_project",
"token_uri": "my-token-uri",
"client_email": "my-client-email",
"private_key": "my-private-key",
}
)
return _service_account_info
@pytest.fixture
def gcp_credentials(
google_auth: MagicMock,
mock_credentials: MagicMock,
job_service_client: MagicMock,
job_service_async_client: MagicMock,
):
gcp_credentials_mock = MagicMock(spec=GcpCredentials)
gcp_credentials_mock.service_account_info = None
gcp_credentials_mock.service_account_info_file = None
gcp_credentials_mock.project = "gcp_credentials_project"
gcp_credentials_mock.get_cloud_storage_client.return_value = CloudStorageClient()
gcp_credentials_mock.get_credentials_from_service_account.return_value = (
mock_credentials
)
gcp_credentials_mock._service_account_email = "my_service_account_email"
gcp_credentials_mock.job_service_client = job_service_client
gcp_credentials_mock.job_service_client.__enter__.return_value = job_service_client
gcp_credentials_mock.job_service_async_client = job_service_async_client
gcp_credentials_mock.job_service_client.__enter__.return_value = (
job_service_async_client
)
gcp_credentials_mock.get_bigquery_client.return_value = BigQueryClient()
gcp_credentials_mock.get_secret_manager_client.return_value = SecretManagerClient()
gcp_credentials_mock.get_job_service_client.return_value = (
gcp_credentials_mock.job_service_client
)
gcp_credentials_mock.get_job_service_async_client.return_value = (
gcp_credentials_mock.job_service_async_client
)
return gcp_credentials_mock
| SecretManagerClient |
python | has2k1__plotnine | plotnine/geoms/geom_quantile.py | {
"start": 77,
"end": 898
} | class ____(geom_path):
"""
Quantile lines from a quantile regression
{usage}
Parameters
----------
{common_parameters}
lineend : Literal["butt", "round", "projecting"], default="butt"
Line end style. This option is applied for solid linetypes.
linejoin : Literal["round", "miter", "bevel"], default="round"
Line join style. This option is applied for solid linetypes.
See Also
--------
plotnine.stat_quantile : The default `stat` for this `geom`.
"""
DEFAULT_AES = {
"alpha": 1,
"color": "#3366FF",
"linetype": "solid",
"size": 0.5,
}
DEFAULT_PARAMS = {
"stat": "quantile",
"position": "identity",
"na_rm": False,
"lineend": "butt",
"linejoin": "round",
}
| geom_quantile |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_pennsylvania_zip.py | {
"start": 767,
"end": 1783
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_pennsylvania_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_pennsylvania_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidPennsylvaniaZip |
python | google__pytype | pytype/tests/test_typing_methods2.py | {
"start": 124,
"end": 2452
} | class ____(test_base.BaseTest):
"""Tests for typing.py."""
def test_mapping(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Mapping
K = TypeVar("K")
V = TypeVar("V")
class MyDict(Mapping[K, V]): ...
def f() -> MyDict[str, int]: ...
""",
)
ty = self.Infer(
"""
import foo
m = foo.f()
a = m.copy()
b = "foo" in m
c = m["foo"]
d = m.get("foo", 3)
e = [x for x in m.items()]
f = [x for x in m.keys()]
g = [x for x in m.values()]
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import List, Tuple, Union
import foo
m = ... # type: foo.MyDict[str, int]
a = ... # type: typing.Mapping[str, int]
b = ... # type: bool
c = ... # type: int
d = ... # type: int
e = ... # type: List[Tuple[str, int]]
f = ... # type: List[str]
g = ... # type: List[int]
""",
)
def test_supportsbytes(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import SupportsBytes
def f() -> SupportsBytes: ...
""",
)
self.Check(
"""
import foo
x = foo.f()
bytes(x)
""",
pythonpath=[d.path],
)
def test_assert_never(self):
self.Check("""
from typing import Union
from typing_extensions import assert_never
def int_or_str(arg: Union[int, str]) -> None:
if isinstance(arg, int):
pass
elif isinstance(arg, str):
pass
else:
assert_never("oops!")
""")
def test_assert_never_failure(self):
errors = self.CheckWithErrors("""
from typing import Union
from typing_extensions import assert_never
def int_or_str(arg: Union[int, str]) -> None:
if isinstance(arg, int):
pass
else:
assert_never("oops!") # wrong-arg-types[e]
""")
self.assertErrorSequences(
errors, {"e": ["Expected", "empty", "Actual", "str"]}
)
if __name__ == "__main__":
test_base.main()
| TypingMethodsTest |
python | celery__celery | t/integration/tasks.py | {
"start": 8879,
"end": 10054
} | class ____(Exception):
"""Exception that doesn't survive a pickling roundtrip (dump + load)."""
def __init__(self, foo, bar=None):
if bar is None:
# We define bar with a default value in the signature so that
# it's easier to add a break point here to find out when the
# exception is being unpickled.
raise TypeError("bar must be provided")
super().__init__(foo)
self.bar = bar
@shared_task
def fail(*args):
"""Task that simply raises ExpectedException."""
args = ("Task expected to fail",) + args
raise ExpectedException(*args)
@shared_task()
def fail_unpickleable(foo, bar):
"""Task that raises an unpickleable exception."""
raise UnpickleableException(foo, bar)
@shared_task(bind=True)
def fail_replaced(self, *args):
"""Replace this task with one which raises ExpectedException."""
raise self.replace(fail.si(*args))
@shared_task(bind=True)
def return_priority(self, *_args):
return "Priority: %s" % self.request.delivery_info['priority']
@shared_task(bind=True)
def return_properties(self):
return self.request.properties
| UnpickleableException |
python | readthedocs__readthedocs.org | readthedocs/core/migrations/0011_alter_historicaluser_first_name.py | {
"start": 148,
"end": 518
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("core", "0010_add_time_fields"),
]
operations = [
migrations.AlterField(
model_name="historicaluser",
name="first_name",
field=models.CharField(blank=True, max_length=150, verbose_name="first name"),
),
]
| Migration |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_memorystore.py | {
"start": 55946,
"end": 59575
} | class ____(GoogleCloudBaseOperator):
"""
Gets the details of a specific Memcached instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreMemcachedGetInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (MemcachedInstanceDetailsLink(),)
def __init__(
self,
*,
location: str,
instance: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance = instance
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"instance_id": self.instance,
"location_id": self.location,
}
def execute(self, context: Context):
hook = CloudMemorystoreMemcachedHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.get_instance(
location=self.location,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
MemcachedInstanceDetailsLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
return cloud_memcache.Instance.to_dict(result)
| CloudMemorystoreMemcachedGetInstanceOperator |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_tasks.py | {
"start": 7787,
"end": 8623
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_resume_queue(self, mock_hook):
mock_hook.return_value.resume_queue.return_value = TEST_QUEUE
operator = CloudTasksQueueResumeOperator(location=LOCATION, queue_name=QUEUE_ID, task_id="id")
result = operator.execute(context=mock.MagicMock())
assert result == {"name": FULL_QUEUE_PATH, "state": 0}
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.resume_queue.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudTasksQueueResume |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/interfaces.py | {
"start": 40198,
"end": 52993
} | class ____:
"""This represents a batch of data.
This is usually not the data itself but a hook to the data on an external datastore such as
a spark or a sql database. An exception exists for pandas or any in-memory datastore.
"""
def __init__( # noqa: PLR0913 # FIXME CoP
self,
datasource: Datasource,
data_asset: DataAsset,
batch_request: BatchRequest,
data: BatchData,
batch_markers: BatchMarkers,
batch_spec: BatchSpec,
batch_definition: LegacyBatchDefinition,
metadata: Dict[str, Any] | None = None,
):
# Immutable attributes
self._datasource = datasource
self._data_asset = data_asset
self._batch_request = batch_request
self._data = data
# Immutable legacy attributes
# TODO: These legacy fields are required but we should figure out how to delete them
self._batch_markers = batch_markers
self._batch_spec = batch_spec
self._batch_definition = batch_definition
# Mutable Attribute
# metadata is any arbitrary data one wants to associate with a batch. GX will add arbitrary metadata # noqa: E501 # FIXME CoP
# to a batch so developers may want to namespace any custom metadata they add.
self.metadata = metadata or {}
# Immutable generated attribute
self._id = self._create_id()
def _create_id(self) -> str:
options_list = []
for key, value in self.batch_request.options.items():
if key not in ("path", "dataframe"):
options_list.append(f"{key}_{value}")
return "-".join([self.datasource.name, self.data_asset.name, *options_list])
@property
def datasource(self) -> Datasource:
return self._datasource
@property
def data_asset(self) -> DataAsset:
return self._data_asset
@property
def batch_request(self) -> BatchRequest:
return self._batch_request
@property
def data(self) -> BatchData:
return self._data
@property
def batch_markers(self) -> BatchMarkers:
return self._batch_markers
@property
def batch_spec(self) -> BatchSpec:
return self._batch_spec
@property
def batch_definition(self) -> LegacyBatchDefinition:
return self._batch_definition
@property
def id(self) -> str:
return self._id
def _get_metrics_calculator(self) -> MetricsCalculator:
self.data.execution_engine.batch_manager.load_batch_list(batch_list=[self])
return MetricsCalculator(
execution_engine=self.data.execution_engine,
show_progress_bars=True,
)
@public_api
@validate_arguments
def columns(self) -> List[str]:
"""Return column names of this Batch.
Returns:
list of column names.
"""
return self._get_metrics_calculator().columns()
@public_api
@validate_arguments
def head(
self,
n_rows: StrictInt = 5,
fetch_all: StrictBool = False,
) -> HeadData:
"""Return the first n rows of this Batch.
This method returns the first n rows for the Batch based on position.
For negative values of n_rows, this method returns all rows except the last n rows.
If n_rows is larger than the number of rows, this method returns all rows.
Args:
n_rows: The number of rows to return from the Batch.
fetch_all: If True, ignore n_rows and return the entire Batch.
Returns:
HeadData
"""
metrics_calculator = self._get_metrics_calculator()
table_head_df: pd.DataFrame = metrics_calculator.head(
n_rows=n_rows,
domain_kwargs={"batch_id": self.id},
fetch_all=fetch_all,
)
return HeadData(data=table_head_df.reset_index(drop=True, inplace=False))
@overload
def validate(
self,
expect: Expectation,
*,
result_format: ResultFormatUnion = DEFAULT_RESULT_FORMAT,
expectation_parameters: Optional[SuiteParameterDict] = None,
) -> ExpectationValidationResult: ...
@overload
def validate(
self,
expect: ExpectationSuite,
*,
result_format: ResultFormatUnion = DEFAULT_RESULT_FORMAT,
expectation_parameters: Optional[SuiteParameterDict] = None,
) -> ExpectationSuiteValidationResult: ...
@public_api
def validate(
self,
expect: Expectation | ExpectationSuite,
*,
result_format: ResultFormatUnion = DEFAULT_RESULT_FORMAT,
expectation_parameters: Optional[SuiteParameterDict] = None,
) -> ExpectationValidationResult | ExpectationSuiteValidationResult:
"""
Validate the Batch using the provided Expectation or Expectation Suite.
Args:
expect: The Expectation or Expectation Suite to validate.
result_format: The format to return the validation results in.
expectation_parameters: A dictionary of parameters values for any
expectations using parameterized values (the $PARAMETER syntax).
The keys are the parameter names and the values are the values
to be used for this validation run.
Returns:
An ExpectationValidationResult or ExpectationSuiteValidationResult object.
Raises:
ValueError: If the expect argument is not an Expectation or an ExpectationSuite.
"""
from great_expectations.core import ExpectationSuite
from great_expectations.expectations.expectation import Expectation
if isinstance(expect, Expectation):
return self._validate_expectation(
expect, result_format=result_format, expectation_parameters=expectation_parameters
)
elif isinstance(expect, ExpectationSuite):
return self._validate_expectation_suite(
expect, result_format=result_format, expectation_parameters=expectation_parameters
)
else:
# If we are type checking, we should never fall through to this case. However, exploratory # noqa: E501 # FIXME CoP
# workflows are not being type checked.
raise ValueError( # noqa: TRY003, TRY004 # FIXME CoP
f"Trying to validate something that isn't an Expectation or an ExpectationSuite: {expect}" # noqa: E501 # FIXME CoP
)
def _validate_expectation(
self,
expect: Expectation,
result_format: ResultFormatUnion,
expectation_parameters: Optional[SuiteParameterDict] = None,
) -> ExpectationValidationResult:
return self._create_validator(
result_format=result_format,
).validate_expectation(expectation=expect, expectation_parameters=expectation_parameters)
def _validate_expectation_suite(
self,
expect: ExpectationSuite,
result_format: ResultFormatUnion,
expectation_parameters: Optional[SuiteParameterDict] = None,
) -> ExpectationSuiteValidationResult:
return self._create_validator(
result_format=result_format,
).validate_expectation_suite(
expectation_suite=expect, expectation_parameters=expectation_parameters
)
def _create_validator(self, *, result_format: ResultFormatUnion) -> V1Validator:
from great_expectations.validator.v1_validator import Validator as V1Validator
context = self.datasource.data_context
if context is None:
raise ValueError( # noqa: TRY003 # FIXME CoP
"We can't validate batches that are attached to datasources without a data context"
)
# note: batch definition is created but NOT added to the asset, as it should not persist
batch_definition = BatchDefinition(
name="-".join([self.datasource.name, self.data_asset.name, str(uuid.uuid4())]),
partitioner=self.batch_request.partitioner,
)
batch_definition.set_data_asset(self.data_asset)
return V1Validator(
batch_definition=batch_definition,
batch_parameters=self.batch_request.options,
result_format=result_format,
)
@overload
def compute_metrics(
self, metrics: Metric[_MetricResultT]
) -> _MetricResultT | MetricErrorResult: ...
@overload
def compute_metrics(self, metrics: list[Metric]) -> list[MetricResult]: ...
def compute_metrics(
self, metrics: Metric[_MetricResultT] | list[Metric]
) -> _MetricResultT | MetricErrorResult | list[MetricResult]:
"""Compute one or more metrics on this Batch.
Args:
metrics: A single Metric or list of Metrics to compute.
Each Metric must be an instance of a concrete Metric subclass.
Returns:
If a single Metric is provided, returns a single MetricResult.
If a list of Metrics is provided, returns a list of MetricResults,
in the same order as the input metrics were received.
For metrics without a defined MetricResult generic type,
the base MetricResult class will be returned.
For metrics that raise an exception, a MetricErrorResult will be returned.
Examples:
>>> batch.compute_metrics(BatchRowCount())
BatchRowCountResult(id=..., value=1000)
>>> batch.compute_metrics([
... BatchRowCount(),
... ColumnMax(column="age")
... ])
[BatchRowCountResult(id=..., value=1000), ColumnMaxResult(id=..., value=85)]
Notes:
Until this mypy bug is resolved, lists of Metrics are being incorrectly
inferred by the static type checker as list[Domain]. You can work around
this by adding an explicit annotation (e.g. metrics: list[Metric] = ...)
https://github.com/python/mypy/issues/18712
"""
is_single_metric = False
if isinstance(metrics, Metric):
metrics = [metrics]
is_single_metric = True
metrics_calculator = self._get_metrics_calculator()
metrics_calculator_results, metrics_calculator_errors = metrics_calculator.compute_metrics(
metric_configurations=[metric.config(batch_id=self.id) for metric in metrics],
runtime_configuration=None,
)
results = []
for metric in metrics:
metric_id_for_batch = metric.metric_id_for_batch(batch_id=self.id)
if metric_id_for_batch in metrics_calculator_results:
MetricType = MetaMetric.get_registered_metric_class_from_metric_name(metric.name)
MetricResultType = MetricType.get_metric_result_type()
value = self._parse_metric_value(
metric_name=metric.name,
metric_calculator_result=metrics_calculator_results[metric_id_for_batch],
)
results.append(MetricResultType(id=metric_id_for_batch, value=value))
elif metric_id_for_batch in metrics_calculator_errors:
metrics_calculator_error = metrics_calculator_errors[metric_id_for_batch]
value = MetricErrorResultValue(
exception_message=metrics_calculator_error["exception_info"].exception_message,
exception_traceback=metrics_calculator_error[
"exception_info"
].exception_traceback,
)
results.append(MetricErrorResult(id=metric_id_for_batch, value=value))
else:
results.append(
MetricErrorResult(
id=metric_id_for_batch,
value=MetricErrorResultValue(
exception_message=f"Metric {metric.name} not found in results: "
f"{list(metrics_calculator_results.keys())} or errors: "
f"{list(metrics_calculator_errors.keys())}",
),
)
)
if is_single_metric:
return results[0]
return results
def _parse_metric_value(
self, metric_name: str, metric_calculator_result: MetricValue
) -> MetricValue:
if metric_name.endswith(MetricNameSuffix.CONDITION) and isinstance(
metric_calculator_result, tuple
):
value = metric_calculator_result[0]
else:
value = metric_calculator_result
return value
| Batch |
python | kubernetes-client__python | kubernetes/client/models/v1alpha1_storage_version_migration_spec.py | {
"start": 383,
"end": 5103
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'continue_token': 'str',
'resource': 'V1alpha1GroupVersionResource'
}
attribute_map = {
'continue_token': 'continueToken',
'resource': 'resource'
}
def __init__(self, continue_token=None, resource=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1StorageVersionMigrationSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._continue_token = None
self._resource = None
self.discriminator = None
if continue_token is not None:
self.continue_token = continue_token
self.resource = resource
@property
def continue_token(self):
"""Gets the continue_token of this V1alpha1StorageVersionMigrationSpec. # noqa: E501
The token used in the list options to get the next chunk of objects to migrate. When the .status.conditions indicates the migration is \"Running\", users can use this token to check the progress of the migration. # noqa: E501
:return: The continue_token of this V1alpha1StorageVersionMigrationSpec. # noqa: E501
:rtype: str
"""
return self._continue_token
@continue_token.setter
def continue_token(self, continue_token):
"""Sets the continue_token of this V1alpha1StorageVersionMigrationSpec.
The token used in the list options to get the next chunk of objects to migrate. When the .status.conditions indicates the migration is \"Running\", users can use this token to check the progress of the migration. # noqa: E501
:param continue_token: The continue_token of this V1alpha1StorageVersionMigrationSpec. # noqa: E501
:type: str
"""
self._continue_token = continue_token
@property
def resource(self):
"""Gets the resource of this V1alpha1StorageVersionMigrationSpec. # noqa: E501
:return: The resource of this V1alpha1StorageVersionMigrationSpec. # noqa: E501
:rtype: V1alpha1GroupVersionResource
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V1alpha1StorageVersionMigrationSpec.
:param resource: The resource of this V1alpha1StorageVersionMigrationSpec. # noqa: E501
:type: V1alpha1GroupVersionResource
"""
if self.local_vars_configuration.client_side_validation and resource is None: # noqa: E501
raise ValueError("Invalid value for `resource`, must not be `None`") # noqa: E501
self._resource = resource
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1StorageVersionMigrationSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1StorageVersionMigrationSpec):
return True
return self.to_dict() != other.to_dict()
| V1alpha1StorageVersionMigrationSpec |
python | PrefectHQ__prefect | tests/input/test_actions.py | {
"start": 3707,
"end": 4377
} | class ____:
async def test_implicit_flow_run(self, flow_run_context):
await create_flow_run_input(key="key", value="value")
await delete_flow_run_input(key="key")
assert (
await read_flow_run_input(
key="key", flow_run_id=flow_run_context.flow_run.id
)
is None
)
async def test_explicit_flow_run(self, flow_run):
await create_flow_run_input(key="key", value="value", flow_run_id=flow_run.id)
await delete_flow_run_input(key="key", flow_run_id=flow_run.id)
assert await read_flow_run_input(key="key", flow_run_id=flow_run.id) is None
| TestDeleteFlowRunInput |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/tiktok/tests.py | {
"start": 240,
"end": 875
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = TikTokProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"data": {
"user": {
"open_id": "44322889",
"username": "username123",
"display_name": "Nice Display Name",
"avatar_url": "https://example.com/avatar.jpg",
"profile_deep_link": "https://example.com/profile"
}
}
}
""",
)
def get_expected_to_str(self):
return "username123"
| TikTokTests |
python | numba__numba | numba/core/typing/npdatetime.py | {
"start": 3983,
"end": 4107
} | class ____(TimedeltaUnaryOp):
key = operator.neg
@infer_global(operator.add)
@infer_global(operator.iadd)
| TimedeltaUnaryNeg |
python | pytorch__pytorch | torch/_dynamo/variables/functions.py | {
"start": 28545,
"end": 29954
} | class ____(BaseUserFunctionVariable):
_nonvar_fields = {
"allowed_types",
*BaseUserFunctionVariable._nonvar_fields,
}
def __init__(
self,
allowed_types: tuple[type, ...],
map_fn: VariableTracker,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.allowed_types = allowed_types
self.map_fn = map_fn
def python_type(self) -> type:
return FunctionType
def _matches_allowed_type(self, node: VariableTracker) -> bool:
try:
node_type = node.python_type()
except NotImplementedError:
return False
return any(issubclass(node_type, allowed) for allowed in self.allowed_types)
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
if not args:
return self.map_fn.call_function(tx, args, kwargs)
leaf = args[0]
if self._matches_allowed_type(leaf):
return self.map_fn.call_function(tx, args, kwargs)
if len(args) != 1 or kwargs:
# Defer to the original map function so we fall back to normal
# tracing instead of triggering a graph break.
return self.map_fn.call_function(tx, args, kwargs)
return leaf
| TreeMapOnlyFunctionVariable |
python | doocs__leetcode | solution/2300-2399/2357.Make Array Zero by Subtracting Equal Amounts/Solution.py | {
"start": 0,
"end": 116
} | class ____:
def minimumOperations(self, nums: List[int]) -> int:
return len({x for x in nums if x})
| Solution |
python | kamyu104__LeetCode-Solutions | Python/sum-of-subarray-ranges.py | {
"start": 29,
"end": 815
} | class ____(object):
def subArrayRanges(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
stk = []
for i in xrange(len(nums)+1):
x = nums[i] if i < len(nums) else float("inf")
while stk and nums[stk[-1]] <= x:
j = stk.pop()
k = stk[-1] if stk else -1
result += nums[j]*(j-k)*(i-j)
stk.append(i)
stk = []
for i in xrange(len(nums)+1):
x = nums[i] if i < len(nums) else float("-inf")
while stk and nums[stk[-1]] >= x:
j = stk.pop()
k = stk[-1] if stk else -1
result -= nums[j]*(j-k)*(i-j)
stk.append(i)
return result
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/sensors/dataplex.py | {
"start": 1698,
"end": 4930
} | class ____(BaseSensorOperator):
"""
Check the status of the Dataplex task.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param dataplex_task_id: Required. Task identifier.
:param api_version: The version of the api that will be requested for example 'v3'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ["dataplex_task_id"]
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
dataplex_task_id: str,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.dataplex_task_id = dataplex_task_id
self.api_version = api_version
self.retry = retry
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def poke(self, context: Context) -> bool:
self.log.info("Waiting for task %s to be %s", self.dataplex_task_id, TaskState.ACTIVE)
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
task = hook.get_task(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
dataplex_task_id=self.dataplex_task_id,
retry=self.retry,
metadata=self.metadata,
)
task_status = task.state
if task_status == TaskState.DELETING:
message = f"Task is going to be deleted {self.dataplex_task_id}"
raise AirflowException(message)
self.log.info("Current status of the Dataplex task %s => %s", self.dataplex_task_id, task_status)
return task_status == TaskState.ACTIVE
| DataplexTaskStateSensor |
python | huggingface__transformers | src/transformers/models/persimmon/modeling_persimmon.py | {
"start": 7352,
"end": 8685
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)
self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size)
self.act = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
hidden_states = self.dense_h_to_4h(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dense_4h_to_h(hidden_states)
return hidden_states
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| PersimmonMLP |
python | huggingface__transformers | tests/models/clipseg/test_modeling_clipseg.py | {
"start": 15171,
"end": 20226
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (CLIPSegModel, CLIPSegForImageSegmentation) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": CLIPSegModel} if is_torch_available() else {}
test_resize_embeddings = False
test_attention_outputs = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
# CLIPSegForImageSegmentation requires special treatment
if return_labels:
if model_class.__name__ == "CLIPSegForImageSegmentation":
batch_size, _, height, width = inputs_dict["pixel_values"].shape
inputs_dict["labels"] = torch.zeros(
[batch_size, height, width], device=torch_device, dtype=torch.float
)
return inputs_dict
def setUp(self):
self.model_tester = CLIPSegModelTester(self)
common_properties = ["projection_dim", "logit_scale_init_value"]
self.config_tester = ConfigTester(
self, config_class=CLIPSegConfig, has_text_modality=False, common_properties=common_properties
)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_for_image_segmentation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_for_image_segmentation(*config_and_inputs)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="CLIPSegModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save CLIPSegConfig and check if we can load CLIPSegVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = CLIPSegVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save CLIPSegConfig and check if we can load CLIPSegTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = CLIPSegTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
def test_training(self):
if not self.model_tester.is_training:
self.skipTest(reason="Training test is skipped as the model was not trained")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue
print("Model class:", model_class)
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
for k, v in inputs.items():
print(k, v.shape)
loss = model(**inputs).loss
loss.backward()
@slow
def test_model_from_pretrained(self):
model_name = "CIDAS/clipseg-rd64-refined"
model = CLIPSegModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
return image
@require_vision
@require_torch
| CLIPSegModelTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/document_summary/base.py | {
"start": 1507,
"end": 1648
} | class ____(str, Enum):
EMBEDDING = "embedding"
LLM = "llm"
_RetrieverMode = DocumentSummaryRetrieverMode
| DocumentSummaryRetrieverMode |
python | gevent__gevent | src/gevent/tests/test__selectors.py | {
"start": 3845,
"end": 5763
} | class ____(greentest.TestCase):
"""
Tests for the crashes and unexpected exceptions
that happen when we try to use or create (depending on
loop implementation) a IO watcher for a closed/invalid file descriptor.
See https://github.com/gevent/gevent/issues/2100
See test__select.py
"""
def test_closing_object_while_selecting(self):
sock = socket.socket()
self.addCleanup(sock.close)
gevent.spawn(sock.close)
sel = selectors.GeventSelector()
sel.register(sock, selectors.EVENT_READ)
# This call needs to be blocking so we get all the way
# to having an open, started IO watcher when the
# socket gets closed.
with sel:
sel.select(timeout=timing.SMALLEST_RELIABLE_DELAY)
def _close_invalid_sock(self, sock):
# Because we closed the FD already (which raises EBADF when done again), but we
# still need to take care of the gevent-resources
try:
sock.close()
except OSError:
pass
def test_closing_fd_while_selecting(self):
# using regular os.close will
# crash under libuv
from gevent import os
sock = socket.socket()
self.addCleanup(self._close_invalid_sock, sock)
gevent.spawn(os.close, sock.fileno())
sel = selectors.GeventSelector()
sel.register(sock, selectors.EVENT_READ)
with sel:
sel.select(timeout=timing.SMALLEST_RELIABLE_DELAY)
def test_closing_fd_before_selecting(self):
import os
sock = socket.socket()
self.addCleanup(self._close_invalid_sock, sock)
os.close(sock.fileno())
sel = selectors.GeventSelector()
with self.assertRaisesRegex(ValueError, 'Invalid file'):
sel.register(sock, selectors.EVENT_READ)
if __name__ == '__main__':
greentest.main()
| TestPossibleCrashes |
python | realpython__materials | django-diary/source_code_final/entries/views.py | {
"start": 337,
"end": 407
} | class ____(LoginRequiredMixin):
login_url = "admin:login"
| LockedView |
python | google__pytype | pytype/pytd/visitors.py | {
"start": 32388,
"end": 32752
} | class ____(ExtractSuperClasses):
"""Visitor for extracting all superclasses (i.e., the class hierarchy).
This returns a mapping by name, e.g. {
"bool": ["int"],
"int": ["object"],
...
}.
"""
def _Key(self, node):
if isinstance(node, (pytd.GenericType, pytd.GENERIC_BASE_TYPE, pytd.Class)):
return node.name
| ExtractSuperClassesByName |
python | arrow-py__arrow | arrow/parser.py | {
"start": 31734,
"end": 33345
} | class ____:
"""
Parser for timezone information.
"""
_TZINFO_RE: ClassVar[Pattern[str]] = re.compile(
r"^(?:\(UTC)*([\+\-])?(\d{2})(?:\:?(\d{2}))?"
)
@classmethod
def parse(cls, tzinfo_string: str) -> dt_tzinfo:
"""
Parse a timezone string and return a datetime timezone object.
:param tzinfo_string: The timezone string to parse.
:type tzinfo_string: str
:returns: The parsed datetime timezone object.
:rtype: datetime.timezone
:raises ParserError: If the timezone string cannot be parsed.
"""
tzinfo: Optional[dt_tzinfo] = None
if tzinfo_string == "local":
tzinfo = datetime.now().astimezone().tzinfo
elif tzinfo_string in ["utc", "UTC", "Z"]:
tzinfo = timezone.utc
else:
iso_match = cls._TZINFO_RE.match(tzinfo_string)
if iso_match:
sign: Optional[str]
hours: str
minutes: Union[str, int, None]
sign, hours, minutes = iso_match.groups()
seconds = int(hours) * 3600 + int(minutes or 0) * 60
if sign == "-":
seconds *= -1
tzinfo = timezone(timedelta(seconds=seconds))
else:
try:
tzinfo = ZoneInfo(tzinfo_string)
except ZoneInfoNotFoundError:
tzinfo = None
if tzinfo is None:
raise ParserError(f"Could not parse timezone expression {tzinfo_string!r}.")
return tzinfo
| TzinfoParser |
python | apache__airflow | airflow-core/tests/unit/dag_processing/bundles/test_base.py | {
"start": 4102,
"end": 4724
} | class ____:
def __init__(self, num, **kwargs):
super().__init__(**kwargs)
self.num = num
self.stop = None
self.did_lock = None
self.locker: BundleVersionLock
def lock_the_file(self):
self.locker = BundleVersionLock(
bundle_name="abc",
bundle_version="this",
)
with self.locker:
self.did_lock = True
idx = 0
while not self.stop:
idx += 1
time.sleep(0.2)
log.info("sleeping: idx=%s num=%s", idx, self.num)
log.info("exit")
| LockTestHelper |
python | joke2k__faker | tests/test_generator.py | {
"start": 159,
"end": 487
} | class ____:
def foo_formatter(self):
return "foobar"
def foo_formatter_with_arguments(self, param="", append=""):
return "baz" + str(param) + str(append)
@pytest.fixture(autouse=True)
def generator():
generator = Generator()
generator.add_provider(FooProvider())
return generator
| FooProvider |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/experiment_service.py | {
"start": 12792,
"end": 15654
} | class ____(GoogleCloudBaseOperator):
"""
Use the Vertex AI SDK to update state of the experiment run.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
:param experiment_run_name: Required. The specific run name or ID for this experiment.
:param new_state: Required. The specific state of experiment run.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"location",
"project_id",
"impersonation_chain",
"experiment_name",
"experiment_run_name",
"new_state",
)
def __init__(
self,
*,
project_id: str,
location: str,
experiment_name: str,
experiment_run_name: str,
new_state: int,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.experiment_name = experiment_name
self.experiment_run_name = experiment_run_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.new_state = new_state
def execute(self, context: Context) -> None:
self.hook = ExperimentRunHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.hook.update_experiment_run_state(
project_id=self.project_id,
experiment_name=self.experiment_name,
experiment_run_name=self.experiment_run_name,
new_state=self.new_state,
location=self.location,
)
self.log.info("New state of the %s is: %s", self.experiment_run_name, self.new_state)
except exceptions.NotFound:
raise AirflowException("Experiment or experiment run not found")
| UpdateExperimentRunStateOperator |
python | jazzband__django-simple-history | simple_history/tests/tests/test_utils.py | {
"start": 11250,
"end": 14029
} | class ____(TransactionTestCase):
def setUp(self):
self.data = [
Poll(id=1, question="Question 1", pub_date=timezone.now()),
Poll(id=2, question="Question 2", pub_date=timezone.now()),
Poll(id=3, question="Question 3", pub_date=timezone.now()),
Poll(id=4, question="Question 4", pub_date=timezone.now()),
Poll(id=5, question="Question 5", pub_date=timezone.now()),
]
@patch(
"simple_history.manager.HistoryManager.bulk_history_create",
Mock(side_effect=Exception),
)
def test_transaction_rolls_back_if_bulk_history_create_fails(self):
with self.assertRaises(Exception):
bulk_create_with_history(self.data, Poll)
self.assertEqual(Poll.objects.count(), 0)
self.assertEqual(Poll.history.count(), 0)
def test_bulk_create_history_on_objects_that_already_exist(self):
Poll.objects.bulk_create(self.data)
with self.assertRaises(IntegrityError):
bulk_create_with_history(self.data, Poll)
self.assertEqual(Poll.objects.count(), 5)
self.assertEqual(Poll.history.count(), 0)
def test_bulk_create_history_rolls_back_when_last_exists(self):
Poll.objects.create(id=5, question="Question 5", pub_date=timezone.now())
self.assertEqual(Poll.objects.count(), 1)
self.assertEqual(Poll.history.count(), 1)
with self.assertRaises(IntegrityError):
bulk_create_with_history(self.data, Poll, batch_size=1)
self.assertEqual(Poll.objects.count(), 1)
self.assertEqual(Poll.history.count(), 1)
def test_bulk_create_fails_with_wrong_model(self):
with self.assertRaises(AttributeError):
bulk_create_with_history(self.data, Document)
self.assertEqual(Poll.objects.count(), 0)
self.assertEqual(Poll.history.count(), 0)
@patch("simple_history.utils.get_history_manager_for_model")
def test_bulk_create_no_ids_return(self, hist_manager_mock):
objects = [Place(id=1, name="Place 1")]
model = Mock(
_default_manager=Mock(
bulk_create=Mock(return_value=[Place(name="Place 1")]),
filter=Mock(return_value=Mock(order_by=Mock(return_value=objects))),
),
_meta=Mock(get_fields=Mock(return_value=[])),
)
result = bulk_create_with_history(objects, model)
self.assertEqual(result, objects)
hist_manager_mock().bulk_history_create.assert_called_with(
objects,
batch_size=None,
default_user=None,
default_change_reason=None,
default_date=None,
custom_historical_attrs=None,
)
| BulkCreateWithHistoryTransactionTestCase |
python | neetcode-gh__leetcode | python/0746-min-cost-climbing-stairs.py | {
"start": 0,
"end": 215
} | class ____:
def minCostClimbingStairs(self, cost: List[int]) -> int:
for i in range(len(cost) - 3, -1, -1):
cost[i] += min(cost[i + 1], cost[i + 2])
return min(cost[0], cost[1])
| Solution |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 26194,
"end": 26598
} | class ____(Sky2PixProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - sky to pixel.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\
y &= \gamma \sin \theta
where:
.. math::
\gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}}
"""
| Sky2Pix_HammerAitoff |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/op_definition.py | {
"start": 2378,
"end": 24901
} | class ____(NodeDefinition, IHasInternalInit):
"""Defines an op, the functional unit of user-defined computation.
End users should prefer the :func:`@op <op>` decorator. OpDefinition is generally intended to be
used by framework authors or for programatically generated ops.
Args:
name (str): Name of the op. Must be unique within any :py:class:`GraphDefinition` or
:py:class:`JobDefinition` that contains the op.
input_defs (List[InputDefinition]): Inputs of the op.
compute_fn (Callable): The core of the op, the function that performs the actual
computation. The signature of this function is determined by ``input_defs``, and
optionally, an injected first argument, ``context``, a collection of information
provided by the system.
This function will be coerced into a generator or an async generator, which must yield
one :py:class:`Output` for each of the op's ``output_defs``, and additionally may
yield other types of Dagster events, including :py:class:`AssetMaterialization` and
:py:class:`ExpectationResult`.
output_defs (List[OutputDefinition]): Outputs of the op.
config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check
that the config provided for the op matches this schema and will fail if it does not. If
not set, Dagster will accept any config provided for the op.
description (Optional[str]): Human-readable description of the op.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may
expect and require certain metadata to be attached to a op. Users should generally
not set metadata directly. Values that are not strings will be json encoded and must meet
the criteria that `json.loads(json.dumps(value)) == value`.
required_resource_keys (Optional[Set[str]]): Set of resources handles required by this op.
code_version (Optional[str]): Version of the code encapsulated by the op. If set,
this is used as a default code version for all outputs.
retry_policy (Optional[RetryPolicy]): The retry policy for this op.
pool (Optional[str]): A string that identifies the pool that governs this op's execution.
Examples:
.. code-block:: python
def _add_one(_context, inputs):
yield Output(inputs["num"] + 1)
OpDefinition(
name="add_one",
ins={"num": In(int)},
outs={"result": Out(int)},
compute_fn=_add_one,
)
"""
_compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"]
_config_schema: IDefinitionConfigSchema
_required_resource_keys: AbstractSet[str]
_version: Optional[str]
_retry_policy: Optional[RetryPolicy]
_pool: Optional[str]
def __init__(
self,
compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"],
name: str,
ins: Optional[Mapping[str, In]] = None,
outs: Optional[Mapping[str, Out]] = None,
description: Optional[str] = None,
config_schema: Optional[Union[UserConfigSchema, IDefinitionConfigSchema]] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
tags: Optional[Mapping[str, Any]] = None,
version: Optional[str] = None,
retry_policy: Optional[RetryPolicy] = None,
code_version: Optional[str] = None,
pool: Optional[str] = None,
):
from dagster._core.definitions.decorators.op_decorator import (
DecoratedOpFunction,
resolve_checked_op_fn_inputs,
)
ins = check.opt_mapping_param(ins, "ins")
input_defs = [
inp.to_definition(name) for name, inp in sorted(ins.items(), key=lambda inp: inp[0])
] # sort so that input definition order is deterministic
if isinstance(compute_fn, DecoratedOpFunction):
resolved_input_defs: Sequence[InputDefinition] = resolve_checked_op_fn_inputs(
decorator_name="@op",
fn_name=name,
compute_fn=cast("DecoratedOpFunction", compute_fn),
explicit_input_defs=input_defs,
exclude_nothing=True,
)
self._compute_fn = compute_fn
_validate_context_type_hint(self._compute_fn.decorated_fn)
else:
resolved_input_defs = input_defs
self._compute_fn = check.callable_param(compute_fn, "compute_fn")
_validate_context_type_hint(self._compute_fn)
code_version = normalize_renamed_param(
code_version,
"code_version",
version,
"version",
)
self._version = code_version
check.opt_mapping_param(outs, "outs")
output_defs = _resolve_output_defs_from_outs(
compute_fn=compute_fn, outs=outs, default_code_version=code_version
)
self._config_schema = convert_user_facing_definition_config_schema(config_schema)
self._required_resource_keys = frozenset(
check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)
)
self._retry_policy = check.opt_inst_param(retry_policy, "retry_policy", RetryPolicy)
self._pool = pool
pool = _validate_pool(pool, tags)
positional_inputs = (
self._compute_fn.positional_inputs()
if isinstance(self._compute_fn, DecoratedOpFunction)
else None
)
super().__init__(
name=name,
input_defs=check.sequence_param(resolved_input_defs, "input_defs", InputDefinition),
output_defs=check.sequence_param(output_defs, "output_defs", OutputDefinition),
description=description,
tags=(check.opt_mapping_param(tags, "tags", key_type=str)),
positional_inputs=positional_inputs,
)
def dagster_internal_init(
*,
compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"],
name: str,
ins: Optional[Mapping[str, In]],
outs: Optional[Mapping[str, Out]],
description: Optional[str],
config_schema: Optional[Union[UserConfigSchema, IDefinitionConfigSchema]],
required_resource_keys: Optional[AbstractSet[str]],
tags: Optional[Mapping[str, Any]],
version: Optional[str],
retry_policy: Optional[RetryPolicy],
code_version: Optional[str],
pool: Optional[str],
) -> "OpDefinition":
return OpDefinition(
compute_fn=compute_fn,
name=name,
ins=ins,
outs=outs,
description=description,
config_schema=config_schema,
required_resource_keys=required_resource_keys,
tags=tags,
version=version,
retry_policy=retry_policy,
code_version=code_version,
pool=pool,
)
@property
def node_type_str(self) -> str:
return "op"
@property
def is_graph_job_op_node(self) -> bool:
return True
@public
@property
def name(self) -> str:
"""str: The name of this op."""
return super().name
@public
@property
def ins(self) -> Mapping[str, In]:
"""Mapping[str, In]: A mapping from input name to the In object that represents that input."""
return {input_def.name: In.from_definition(input_def) for input_def in self.input_defs}
@public
@property
def outs(self) -> Mapping[str, Out]:
"""Mapping[str, Out]: A mapping from output name to the Out object that represents that output."""
return {output_def.name: Out.from_definition(output_def) for output_def in self.output_defs}
@property
def compute_fn(self) -> Union[Callable[..., Any], "DecoratedOpFunction"]:
return self._compute_fn
@public
@property
def config_schema(self) -> IDefinitionConfigSchema:
"""IDefinitionConfigSchema: The config schema for this op."""
return self._config_schema
@public
@property
def required_resource_keys(self) -> AbstractSet[str]:
"""AbstractSet[str]: A set of keys for resources that must be provided to this OpDefinition."""
return frozenset(self._required_resource_keys)
@public
@deprecated(breaking_version="2.0", additional_warn_text="Use `code_version` instead.")
@property
def version(self) -> Optional[str]:
"""str: Version of the code encapsulated by the op. If set, this is used as a
default code version for all outputs.
"""
return self._version
@public
@property
def retry_policy(self) -> Optional[RetryPolicy]:
"""Optional[RetryPolicy]: The RetryPolicy for this op."""
return self._retry_policy
@public
@property
def tags(self) -> Mapping[str, str]:
"""Mapping[str, str]: The tags for this op."""
return super().tags
@public
def alias(self, name: str) -> "PendingNodeInvocation":
"""Creates a copy of this op with the given name."""
return super().alias(name)
@public
def tag(self, tags: Optional[Mapping[str, str]]) -> "PendingNodeInvocation":
"""Creates a copy of this op with the given tags."""
return super().tag(tags)
@public
def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "PendingNodeInvocation":
"""Creates a copy of this op with the given hook definitions."""
return super().with_hooks(hook_defs)
@public
def with_retry_policy(self, retry_policy: RetryPolicy) -> "PendingNodeInvocation":
"""Creates a copy of this op with the given retry policy."""
return super().with_retry_policy(retry_policy)
@property
def pool(self) -> Optional[str]:
"""Optional[str]: The concurrency pool for this op."""
return self._pool
@property
def pools(self) -> Set[str]:
"""Optional[str]: The concurrency pools for this op node."""
return {self._pool} if self._pool else set()
def is_from_decorator(self) -> bool:
from dagster._core.definitions.decorators.op_decorator import DecoratedOpFunction
return isinstance(self._compute_fn, DecoratedOpFunction)
def get_output_annotation(self) -> Any:
if not self.is_from_decorator():
raise DagsterInvalidInvocationError(
f"Attempted to get output annotation for {self.node_type_str} '{self.name}', "
"which was not constructed from a decorated function."
)
return cast("DecoratedOpFunction", self.compute_fn).get_output_annotation()
def all_dagster_types(self) -> Iterator[DagsterType]:
yield from self.all_input_output_types()
def iterate_node_defs(self) -> Iterator[NodeDefinition]:
yield self
def iterate_op_defs(self) -> Iterator["OpDefinition"]:
yield self
def resolve_output_to_origin(
self, output_name: str, handle: Optional[NodeHandle]
) -> tuple[OutputDefinition, Optional[NodeHandle]]:
return self.output_def_named(output_name), handle
def resolve_output_to_origin_op_def(self, output_name: str) -> "OpDefinition":
return self
def get_inputs_must_be_resolved_top_level(
self, asset_layer: "AssetLayer", handle: Optional[NodeHandle] = None
) -> Sequence[InputDefinition]:
handle = cast("NodeHandle", check.inst_param(handle, "handle", NodeHandle))
unresolveable_input_defs = []
for input_def in self.input_defs:
if (
not input_def.dagster_type.loader
and not input_def.dagster_type.kind == DagsterTypeKind.NOTHING
and not input_def.has_default_value
and not input_def.input_manager_key
):
input_asset_key = asset_layer.get_asset_key_for_node_input(handle, input_def.name)
# If input_asset_key is present, this input can be resolved
# by a source asset, so input does not need to be resolved
# at the top level.
if input_asset_key:
continue
unresolveable_input_defs.append(input_def)
return unresolveable_input_defs
def input_has_default(self, input_name: str) -> bool:
return self.input_def_named(input_name).has_default_value
def default_value_for_input(self, input_name: str) -> InputDefinition:
return self.input_def_named(input_name).default_value
def input_supports_dynamic_output_dep(self, input_name: str) -> bool:
return True
def with_replaced_properties(
self,
name: str,
ins: Optional[Mapping[str, In]] = None,
outs: Optional[Mapping[str, Out]] = None,
config_schema: Optional[IDefinitionConfigSchema] = None,
description: Optional[str] = None,
) -> "OpDefinition":
return OpDefinition.dagster_internal_init(
name=name,
ins={input_def.name: In.from_definition(input_def) for input_def in self.input_defs}
if ins is None
else ins,
outs={
output_def.name: Out.from_definition(output_def) for output_def in self.output_defs
}
if outs is None
else outs,
compute_fn=self.compute_fn,
config_schema=config_schema or self.config_schema,
description=description or self.description,
tags=self.tags,
required_resource_keys=self.required_resource_keys,
code_version=self._version,
retry_policy=self.retry_policy,
version=None, # code_version replaces version
pool=self.pool,
)
def copy_for_configured(
self,
name: str,
description: Optional[str],
config_schema: IDefinitionConfigSchema,
) -> "OpDefinition":
return self.with_replaced_properties(
name=name,
description=description,
config_schema=config_schema,
)
def get_resource_requirements(
self,
handle: Optional[NodeHandle],
asset_layer: Optional["AssetLayer"],
) -> Iterator[ResourceRequirement]:
node_description = f"{self.node_type_str} '{handle or self.name}'"
for resource_key in sorted(list(self.required_resource_keys)):
yield OpDefinitionResourceRequirement(
key=resource_key, node_description=node_description
)
for input_def in self.input_defs:
if input_def.input_manager_key:
yield InputManagerRequirement(
key=input_def.input_manager_key,
node_description=node_description,
input_name=input_def.name,
root_input=False,
)
elif asset_layer and handle:
input_asset_key = asset_layer.get_asset_key_for_node_input(handle, input_def.name)
if input_asset_key:
io_manager_key = (
asset_layer.get(input_asset_key).io_manager_key
if asset_layer.has(input_asset_key)
else DEFAULT_IO_MANAGER_KEY
)
yield InputManagerRequirement(
key=io_manager_key,
node_description=node_description,
input_name=input_def.name,
root_input=False,
)
for output_def in self.output_defs:
yield OutputManagerRequirement(
key=output_def.io_manager_key,
node_description=node_description,
output_name=output_def.name,
)
def resolve_input_to_destinations(
self, input_handle: NodeInputHandle
) -> Sequence[NodeInputHandle]:
return [input_handle]
def resolve_output_to_destinations(
self, output_name: str, handle: Optional[NodeHandle]
) -> Sequence[NodeInputHandle]:
return []
def __call__(self, *args, **kwargs) -> Any:
from dagster._core.definitions.composition import is_in_composition
if is_in_composition():
return super().__call__(*args, **kwargs)
return direct_invocation_result(self, *args, **kwargs)
def get_op_handles(self, parent: NodeHandle) -> AbstractSet[NodeHandle]:
return {parent}
def get_op_output_handles(self, parent: Optional[NodeHandle]) -> AbstractSet[NodeOutputHandle]:
return {
NodeOutputHandle(node_handle=parent, output_name=output_def.name)
for output_def in self.output_defs
}
def _resolve_output_defs_from_outs(
compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"],
outs: Optional[Mapping[str, Out]],
default_code_version: Optional[str],
) -> Sequence[OutputDefinition]:
from dagster._core.definitions.decorators.op_decorator import DecoratedOpFunction
if isinstance(compute_fn, DecoratedOpFunction):
inferred_output_props = infer_output_props(compute_fn.decorated_fn)
annotation = inferred_output_props.annotation
description = inferred_output_props.description
else:
inferred_output_props = None
annotation = inspect.Parameter.empty
description = None
if outs is None:
return [OutputDefinition.create_from_inferred(inferred_output_props, default_code_version)]
# If only a single entry has been provided to the out dict, then slurp the
# annotation into the entry.
if len(outs) == 1:
name = next(iter(outs.keys()))
only_out = outs[name]
return [only_out.to_definition(annotation, name, description, default_code_version)]
# If multiple outputs...
# Note: we don't provide description when using multiple outputs. Introspection
# is challenging when faced with multiple outputs.
# ... and no annotation, use empty for each output annotation
if annotation == inspect.Parameter.empty:
return [
out.to_definition(
annotation_type=inspect.Parameter.empty,
name=name,
description=None,
code_version=default_code_version,
)
for (name, out) in outs.items()
]
# ... or if a single result object type, use None for each output annotation
if _is_result_object_type(annotation):
# this can happen for example when there are outputs for checks
# that get reported via a singular MaterializeResult
return [
out.to_definition(
annotation_type=type(None),
name=name,
description=None,
code_version=default_code_version,
)
for (name, out) in outs.items()
]
# ... otherwise we expect to have a tuple with entries...
if get_origin(annotation) != tuple:
raise DagsterInvariantViolationError(
"Expected Tuple annotation for multiple outputs, but received non-tuple annotation."
)
subtypes = get_args(annotation)
# ... if they are all result object entries use None
if len(subtypes) > 0 and all(_is_result_object_type(t) for t in subtypes):
# the counts of subtypes and outputs may not align due to checks results
# being passed via MaterializeResult similar to above.
return [
out.to_definition(
annotation_type=type(None),
name=name,
description=None,
code_version=default_code_version,
)
for (name, out) in outs.items()
]
# ... otherwise they should align with outputs
if len(subtypes) != len(outs):
raise DagsterInvariantViolationError(
"Expected Tuple annotation to have number of entries matching the "
f"number of outputs for more than one output. Expected {len(outs)} "
f"outputs but annotation has {len(subtypes)}."
)
return [
cur_out.to_definition(
annotation_type=subtypes[idx],
name=name,
description=None,
code_version=default_code_version,
)
for idx, (name, cur_out) in enumerate(outs.items())
]
def _validate_context_type_hint(fn):
from inspect import _empty as EmptyAnnotation
from dagster._core.decorator_utils import get_function_params
from dagster._core.definitions.decorators.op_decorator import is_context_provided
from dagster._core.execution.context.compute import (
AssetCheckExecutionContext,
AssetExecutionContext,
OpExecutionContext,
)
params = get_function_params(fn)
if is_context_provided(params):
if params[0].annotation not in [
AssetExecutionContext,
OpExecutionContext,
EmptyAnnotation,
AssetCheckExecutionContext,
]:
raise DagsterInvalidDefinitionError(
f"Cannot annotate `context` parameter with type {params[0].annotation}. `context`"
" must be annotated with AssetExecutionContext, AssetCheckExecutionContext, OpExecutionContext, or left blank."
)
def _is_result_object_type(ttype):
# Is this type special result object type
return ttype in (MaterializeResult, ObserveResult, AssetCheckResult)
VALID_POOL_NAME_REGEX_STR = r"^[A-Za-z0-9_\/]+$" # standard name regex with slashes
VALID_POOL_NAME_REGEX = re.compile(VALID_POOL_NAME_REGEX_STR)
def _validate_pool(pool, tags):
check.opt_str_param(pool, "pool")
if not pool:
return None
if not VALID_POOL_NAME_REGEX.match(pool):
raise DagsterInvalidDefinitionError(
f'Pool "{pool}" is not a valid pool name. It must match the regex {VALID_POOL_NAME_REGEX_STR}.'
)
tags = check.opt_mapping_param(tags, "tags")
tag_concurrency_key = tags.get(GLOBAL_CONCURRENCY_TAG)
if tag_concurrency_key and pool != tag_concurrency_key:
raise DagsterInvalidDefinitionError(
f'Pool "{pool}" conflicts with the concurrency key tag "{tag_concurrency_key}".'
)
return pool
| OpDefinition |
python | falconry__falcon | tests/test_middleware.py | {
"start": 4558,
"end": 4692
} | class ____:
def on_get(self, req, resp, **kwargs):
resp.status = falcon.HTTP_200
resp.text = 'Test'
| TestCorsResource |
python | ipython__ipython | examples/IPython Kernel/gui/gui-qt.py | {
"start": 286,
"end": 1009
} | class ____(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setGeometry(300, 300, 200, 80)
self.setWindowTitle('Hello World')
quit = QtGui.QPushButton('Close', self)
quit.setGeometry(10, 10, 60, 35)
self.connect(quit, QtCore.SIGNAL('clicked()'),
self, QtCore.SLOT('close()'))
if __name__ == '__main__':
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication([])
sw = SimpleWindow()
sw.show()
try:
from IPython.lib.guisupport import start_event_loop_qt4
start_event_loop_qt4(app)
except ImportError:
app.exec_()
| SimpleWindow |
python | wandb__wandb | wandb/filesync/stats.py | {
"start": 322,
"end": 427
} | class ____(NamedTuple):
artifact: int
wandb: int
media: int
other: int
| FileCountsByCategory |
python | walkccc__LeetCode | solutions/2933. High-Access Employees/2933.py | {
"start": 0,
"end": 425
} | class ____:
def findHighAccessEmployees(self, access_times: list[list[str]]) -> list[str]:
ans = set()
access_times.sort()
for i in range(len(access_times) - 2):
name = access_times[i][0]
if name in ans:
continue
if name != access_times[i + 2][0]:
continue
if int(access_times[i + 2][1]) - int(access_times[i][1]) < 100:
ans.add(name)
return list(ans)
| Solution |
python | huggingface__transformers | src/transformers/models/pegasus/tokenization_pegasus.py | {
"start": 1013,
"end": 7244
} | class ____(TokenizersBackend):
r"""
Construct a PEGASUS tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
The token used for masking single token values. This is the token used when training this model with masked
language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization](https://huggingface.co/papers/1912.08777).
mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
The token used for masking whole target sentences. This is the token used when training this model with gap
sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
Abstractive Summarization](https://huggingface.co/papers/1912.08777).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
<unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
that uses the tokens 2 - 104 only for pretraining
offset (`int`, *optional*, defaults to 103):
Offset for additional special tokens.
vocab (`dict`, *optional*):
Custom vocabulary dictionary. If not provided, a blank vocabulary is initialized.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
pad_token="<pad>",
eos_token="</s>",
unk_token="<unk>",
mask_token="<mask_2>",
mask_token_sent="<mask_1>",
additional_special_tokens=None,
offset=103,
vocab=None,
vocab_file=None,
**kwargs,
):
self.offset = offset
self.vocab_file = vocab_file
if additional_special_tokens is None:
additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
if vocab is not None:
# For Pegasus, insert special tokens at the beginning
special_tokens_set = {pad_token, eos_token, mask_token_sent, mask_token, unk_token}
special_tokens_set.update(additional_special_tokens)
# Build special tokens in correct order
_vocab_list = [
(str(pad_token), 0.0),
(str(eos_token), 0.0),
]
if mask_token_sent:
_vocab_list.append((str(mask_token_sent), 0.0))
for token in additional_special_tokens:
if token not in [pad_token, eos_token, mask_token_sent]:
_vocab_list.append((str(token), 0.0))
if mask_token not in [t for t, _ in _vocab_list]:
_vocab_list.append((str(mask_token), 0.0))
_vocab_list.append((str(unk_token), 0.0))
# Filter out special tokens from main vocab and combine
filtered_vocab = [(t, s) for t, s in vocab if t not in special_tokens_set]
_vocab_list = _vocab_list + filtered_vocab
else:
_vocab_list = [(str(unk_token), 0.0)]
self._vocab = {token: idx for idx, (token, _) in enumerate(_vocab_list)}
self._tokenizer = Tokenizer(Unigram(vocab=_vocab_list, unk_id=self._vocab.get(str(unk_token), 0)))
self._tokenizer.normalizer = normalizers.Sequence(
[normalizers.Replace(Regex(r"\n"), " "), normalizers.Replace(Regex(r" {2,}"), " ")]
)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=f"$A {eos_token}",
pair=f"$A $B {eos_token}",
special_tokens=[(str(eos_token), self._vocab.get(str(eos_token), 1))],
)
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
pad_token=pad_token,
eos_token=eos_token,
unk_token=unk_token,
mask_token=mask_token,
mask_token_sent=mask_token_sent,
offset=offset,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement="▁", prepend_scheme="always", split=True)
self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme="always", split=True)
__all__ = ["PegasusTokenizer"]
| PegasusTokenizer |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/merge.py | {
"start": 14024,
"end": 19608
} | class ____(_Merge):
"""Layer that concatenates a list of inputs.
It takes as input a list of tensors, all of the same shape except
for the concatenation axis, and returns a single tensor that is the
concatenation of all inputs.
>>> x = np.arange(20).reshape(2, 2, 5)
>>> print(x)
[[[ 0 1 2 3 4]
[ 5 6 7 8 9]]
[[10 11 12 13 14]
[15 16 17 18 19]]]
>>> y = np.arange(20, 30).reshape(2, 1, 5)
>>> print(y)
[[[20 21 22 23 24]]
[[25 26 27 28 29]]]
>>> tf.keras.layers.Concatenate(axis=1)([x, y])
<tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy=
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24]],
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[25, 26, 27, 28, 29]]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> concatted = tf.keras.layers.Concatenate()([x1, x2])
>>> concatted.shape
TensorShape([5, 16])
"""
def __init__(self, axis=-1, **kwargs):
"""Instantiates a Concatenate layer.
>>> x = np.arange(20).reshape(2, 2, 5)
>>> print(x)
[[[ 0 1 2 3 4]
[ 5 6 7 8 9]]
[[10 11 12 13 14]
[15 16 17 18 19]]]
>>> y = np.arange(20, 30).reshape(2, 1, 5)
>>> print(y)
[[[20 21 22 23 24]]
[[25 26 27 28 29]]]
>>> tf.keras.layers.Concatenate(axis=1)([x, y])
<tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy=
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24]],
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[25, 26, 27, 28, 29]]])>
Args:
axis: Axis along which to concatenate.
**kwargs: standard layer keyword arguments.
"""
super(Concatenate, self).__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self._reshape_required = False
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape[0], tuple) or len(input_shape) < 1:
raise ValueError('A `Concatenate` layer should be called '
'on a list of at least 1 input.')
if all(shape is None for shape in input_shape):
return
reduced_inputs_shapes = [list(shape) for shape in input_shape]
shape_set = set()
for i in range(len(reduced_inputs_shapes)):
del reduced_inputs_shapes[i][self.axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) != 1:
err_msg = ('A `Concatenate` layer requires inputs with matching shapes '
'except for the concat axis. Got inputs shapes: %s' %
input_shape)
# Make sure all the shapes have same ranks.
ranks = set(len(shape) for shape in shape_set)
if len(ranks) != 1:
raise ValueError(err_msg)
# Get the only rank for the set.
(rank,) = ranks
for axis in range(rank):
# Skip the Nones in the shape since they are dynamic, also the axis for
# concat has been removed above.
unique_dims = set(
shape[axis] for shape in shape_set if shape[axis] is not None)
if len(unique_dims) > 1:
raise ValueError(err_msg)
def _merge_function(self, inputs):
return backend.concatenate(inputs, axis=self.axis)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if ((not isinstance(input_shape, (tuple, list))) or
(not isinstance(input_shape[0], (tuple, list)))):
# The tf_utils.shape_type_conversion decorator turns tensorshapes
# into tuples, so we need to verify that `input_shape` is a list/tuple,
# *and* that the individual elements are themselves shape tuples.
raise ValueError('A `Concatenate` layer should be called '
'on a list of inputs.')
input_shapes = input_shape
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
if output_shape[self.axis] is None or shape[self.axis] is None:
output_shape[self.axis] = None
break
output_shape[self.axis] += shape[self.axis]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, (tuple, list)):
raise ValueError('`mask` should be a list.')
if not isinstance(inputs, (tuple, list)):
raise ValueError('`inputs` should be a list.')
if len(mask) != len(inputs):
raise ValueError('The lists `inputs` and `mask` '
'should have the same length.')
if all(m is None for m in mask):
return None
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
masks.append(array_ops.ones_like(input_i, dtype='bool'))
elif backend.ndim(mask_i) < backend.ndim(input_i):
# Mask is smaller than the input, expand it
masks.append(array_ops.expand_dims(mask_i, axis=-1))
else:
masks.append(mask_i)
concatenated = backend.concatenate(masks, axis=self.axis)
return backend.all(concatenated, axis=-1, keepdims=False)
def get_config(self):
config = {
'axis': self.axis,
}
base_config = super(Concatenate, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| Concatenate |
python | plotly__plotly.py | plotly/graph_objs/layout/scene/xaxis/_tickformatstop.py | {
"start": 235,
"end": 8527
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene.xaxis"
_path_str = "layout.scene.xaxis.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.scene.x
axis.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.xaxis.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.xaxis.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | ansible__ansible | test/units/module_utils/facts/test_facts.py | {
"start": 3185,
"end": 3379
} | class ____(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = hardware.freebsd.FreeBSDHardware
collector_class = hardware.freebsd.FreeBSDHardwareCollector
| TestFreeBSDHardware |
python | davidhalter__jedi | test/completion/comprehensions.py | {
"start": 2257,
"end": 3779
} | class ____():
def __init__(self, bar):
self.bar = bar
def foo(self):
x = [a for a in self.bar][0]
#? int()
x
return x
#? int()
X([1]).foo()
# -----------------
# dict comprehensions
# -----------------
#? int()
list({a - 1: 3 for a in [1]})[0]
d = {a - 1: b for a, b in {1: 'a', 3: 1.0}.items()}
#? int()
list(d)[0]
#? str() float()
d.values()[0]
#? str()
d[0]
#? float() str()
d[1]
#? float()
d[2]
# -----------------
# set comprehensions
# -----------------
#? set()
{a - 1 for a in [1]}
#? set()
{a for a in range(10)}
#? int()
[x for x in {a for a in range(10)}][0]
#? int()
{a for a in range(10)}.pop()
#? float() str()
{b for a in [[3.0], ['']] for b in a}.pop()
#? int()
next(iter({a for a in range(10)}))
#? int()
[a for a in {1, 2, 3}][0]
# -----------------
# syntax errors
# -----------------
# Issue #1146
#? ['list']
[int(str(x.value) for x in list
def reset_missing_bracket(): pass
# -----------------
# function calls
# -----------------
def foo(arg):
return arg
x = foo(x for x in [1])
#? int()
next(x)
#?
x[0]
# While it's illegal to have more than one argument, when a generator
# expression is involved, it's still a valid parse tree and Jedi should still
# work (and especially not raise Exceptions). It's debatable wheter inferring
# values for invalid statements is a good idea, but not failing is a must.
#? int()
next(foo(x for x in [1], 1))
def bar(x, y):
return y
#? str()
next(bar(x for x in [1], x for x in ['']))
| X |
python | html5lib__html5lib-python | html5lib/html5parser.py | {
"start": 83142,
"end": 86414
} | class ____(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
__slots__ = tuple()
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
startTagHandler = _utils.MethodDispatcher([
("html", Phase.startTagHtml),
(("td", "th"), startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), startTagTableOther)
])
startTagHandler.default = startTagOther
endTagHandler = _utils.MethodDispatcher([
("tr", endTagTr),
("table", endTagTable),
(("tbody", "tfoot", "thead"), endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
endTagIgnore)
])
endTagHandler.default = endTagOther
| InRowPhase |
python | ApeWorX__ape | src/ape_networks/config.py | {
"start": 962,
"end": 1116
} | class ____(PluginConfig):
custom: list[CustomNetwork] = []
model_config = SettingsConfigDict(extra="allow", env_prefix="APE_NETWORKS_")
| NetworksConfig |
python | wandb__wandb | tests/system_tests/test_artifacts/test_wandb_artifacts.py | {
"start": 15106,
"end": 66484
} | class ____:
@pytest.fixture
def run(self, user) -> Iterator[wandb.Run]:
with wandb.init() as run:
yield run
@pytest.fixture
def orig_data(self) -> str:
"""The contents of the original file."""
return "hello"
@pytest.fixture
def orig_fpath(self, tmp_path_factory) -> Path:
"""The path to the original file."""
# Use a factory to generate unique filepaths per test
return tmp_path_factory.mktemp("orig_path") / "file1.txt"
@pytest.fixture
def orig_artifact(self, orig_fpath, orig_data, artifact, run) -> Artifact:
"""The original, logged artifact in the sequence collection."""
file_path = orig_fpath
file_path.write_text(orig_data)
# Create the reference artifact and log it while bypassing the checksum
artifact.add_reference(file_path.as_uri(), checksum=False)
logged_artifact = run.log_artifact(artifact)
logged_artifact.wait()
# Assumption/consistency check
assert logged_artifact.version == "v0"
return logged_artifact
@pytest.fixture
def new_data(self) -> str:
"""The contents of the new file."""
return "goodbye"
@pytest.fixture
def new_fpath(self, tmp_path_factory) -> Path:
"""The path to the new file."""
return tmp_path_factory.mktemp("new_path") / "file2.txt"
@pytest.fixture
def new_artifact(self, orig_artifact) -> Artifact:
"""A new artifact with the same name and type, but not yet logged."""
return wandb.Artifact(orig_artifact.name.split(":")[0], type=orig_artifact.type)
def test_adding_ref_with_same_uri_and_same_data_creates_no_new_version(
self, run, orig_fpath, orig_data, orig_artifact, new_artifact
):
fpath = orig_fpath
fpath.write_text(orig_data)
# Create the second reference artifact and log it
new_artifact.add_reference(fpath.as_uri(), checksum=False)
new_artifact = run.log_artifact(new_artifact)
new_artifact.wait()
assert new_artifact.version == orig_artifact.version
def test_adding_ref_with_same_uri_and_new_data_creates_no_new_version(
self, run, orig_fpath, new_data, orig_artifact, new_artifact
):
# Keep the original filepath, but overwrite its contents
fpath = orig_fpath
fpath.write_text(new_data)
# Create the second reference artifact and log it
new_artifact.add_reference(fpath.as_uri(), checksum=False)
new_artifact = run.log_artifact(new_artifact)
new_artifact.wait()
assert new_artifact.version == orig_artifact.version
def test_adding_ref_with_new_uri_and_same_data_creates_new_version(
self, run, new_fpath, orig_data, orig_artifact, new_artifact
):
# Keep the original filepath, but overwrite its contents
fpath = new_fpath
fpath.write_text(orig_data)
# Create the second reference artifact and log it
new_artifact.add_reference(fpath.as_uri(), checksum=False)
new_artifact = run.log_artifact(new_artifact)
new_artifact.wait()
assert new_artifact.version != orig_artifact.version
def test_adding_ref_with_new_uri_and_new_data_creates_new_version(
self, run, new_fpath, new_data, orig_artifact, new_artifact
):
fpath = new_fpath
fpath.write_text(new_data)
# Create the second reference artifact and log it
new_artifact.add_reference(fpath.as_uri(), checksum=False)
new_artifact = run.log_artifact(new_artifact)
new_artifact.wait()
assert new_artifact.version != orig_artifact.version
def test_add_reference_local_dir(artifact):
Path("file1.txt").write_text("hello")
os.mkdir("nest")
Path("nest/file2.txt").write_text("my")
os.mkdir("nest/nest")
Path("nest/nest/file3.txt").write_text("dude")
artifact.add_reference(f"file://{os.getcwd()}")
assert artifact.digest == "72414374bfd4b0f60a116e7267845f71"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["file1.txt"] == {
"digest": "XUFAKrxLKna5cZ2REBfFkg==",
"ref": "file://" + os.path.join(os.getcwd(), "file1.txt"),
"size": 5,
}
assert manifest["contents"]["nest/file2.txt"] == {
"digest": "aGTzidmHZDa8h3j/Bx0bbA==",
"ref": "file://" + os.path.join(os.getcwd(), "nest", "file2.txt"),
"size": 2,
}
assert manifest["contents"]["nest/nest/file3.txt"] == {
"digest": "E7c+2uhEOZC+GqjxpIO8Jw==",
"ref": "file://" + os.path.join(os.getcwd(), "nest", "nest", "file3.txt"),
"size": 4,
}
def test_add_reference_local_dir_no_checksum(artifact):
path_1 = Path("file1.txt")
path_1.parent.mkdir(parents=True, exist_ok=True)
path_1.write_text("hello")
size_1 = path_1.stat().st_size
uri_1 = path_1.resolve().as_uri()
path_2 = Path("nest/file2.txt")
path_2.parent.mkdir(parents=True, exist_ok=True)
path_2.write_text("my")
size_2 = path_2.stat().st_size
uri_2 = path_2.resolve().as_uri()
path_3 = Path("nest", "nest", "file3.txt")
path_3.parent.mkdir(parents=True, exist_ok=True)
path_3.write_text("dude")
size_3 = path_3.stat().st_size
uri_3 = path_3.resolve().as_uri()
here = Path.cwd()
root_uri = here.resolve().as_uri()
artifact.add_reference(root_uri, checksum=False)
expected_entry_digest_1 = md5_string(uri_1)
expected_entry_digest_2 = md5_string(uri_2)
expected_entry_digest_3 = md5_string(uri_3)
# With checksum=False, the artifact digest will depend on its files'
# absolute paths. The working test directory isn't fixed from run
# to run, so there isn't much benefit in asserting on the exact hash here.
# The following are just some basic consistency/sanity checks.
assert isinstance(artifact.digest, str)
assert len(artifact.digest) == 32
assert int(artifact.digest, 16) != 0 # nonzero hexadecimal literal
assert artifact.digest != expected_entry_digest_1
assert artifact.digest != expected_entry_digest_2
assert artifact.digest != expected_entry_digest_3
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["file1.txt"] == {
"digest": expected_entry_digest_1,
"ref": uri_1,
"size": size_1,
}
assert manifest["contents"]["nest/file2.txt"] == {
"digest": expected_entry_digest_2,
"ref": uri_2,
"size": size_2,
}
assert manifest["contents"]["nest/nest/file3.txt"] == {
"digest": expected_entry_digest_3,
"ref": uri_3,
"size": size_3,
}
def test_add_reference_local_dir_with_name(artifact):
Path("file1.txt").write_text("hello")
Path("nest").mkdir(parents=True, exist_ok=True)
Path("nest/file2.txt").write_text("my")
Path("nest/nest").mkdir(parents=True, exist_ok=True)
Path("nest/nest/file3.txt").write_text("dude")
here = Path.cwd()
artifact.add_reference(f"file://{here!s}", name="top")
assert artifact.digest == "f718baf2d4c910dc6ccd0d9c586fa00f"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["top/file1.txt"] == {
"digest": "XUFAKrxLKna5cZ2REBfFkg==",
"ref": f"file://{here!s}/file1.txt",
"size": 5,
}
assert manifest["contents"]["top/nest/file2.txt"] == {
"digest": "aGTzidmHZDa8h3j/Bx0bbA==",
"ref": f"file://{here!s}/nest/file2.txt",
"size": 2,
}
assert manifest["contents"]["top/nest/nest/file3.txt"] == {
"digest": "E7c+2uhEOZC+GqjxpIO8Jw==",
"ref": f"file://{here!s}/nest/nest/file3.txt",
"size": 4,
}
def test_add_reference_local_dir_by_uri(tmp_path, artifact):
ugly_path = tmp_path / "i=D" / "has !@#$%^&[]()|',`~ awful taste in file names"
ugly_path.mkdir(parents=True)
file = ugly_path / "file.txt"
file.write_text("sorry")
artifact.add_reference(ugly_path.as_uri())
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["file.txt"] == {
"digest": "c88OOIlx7k7DTo2u3Q02zA==",
"ref": file.as_uri(),
"size": 5,
}
def test_add_s3_reference_object(artifact):
mock_boto(artifact)
artifact.add_reference("s3://my-bucket/my_object.pb")
assert artifact.digest == "8aec0d6978da8c2b0bf5662b3fd043a4"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_object.pb"] == {
"digest": "1234567890abcde",
"ref": "s3://my-bucket/my_object.pb",
"extra": {"etag": "1234567890abcde", "versionID": "1"},
"size": 10,
}
def test_add_s3_reference_object_directory(artifact):
mock_boto(artifact, path=True)
artifact.add_reference("s3://my-bucket/my_dir/")
assert artifact.digest == "17955d00a20e1074c3bc96c74b724bfe"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_object.pb"] == {
"digest": "1234567890abcde",
"ref": "s3://my-bucket/my_dir",
"extra": {"etag": "1234567890abcde", "versionID": "1"},
"size": 10,
}
def test_add_s3_reference_object_no_version(artifact):
mock_boto(artifact, version_id=None)
artifact.add_reference("s3://my-bucket/my_object.pb")
assert artifact.digest == "8aec0d6978da8c2b0bf5662b3fd043a4"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_object.pb"] == {
"digest": "1234567890abcde",
"ref": "s3://my-bucket/my_object.pb",
"extra": {"etag": "1234567890abcde"},
"size": 10,
}
def test_add_s3_reference_object_with_version(artifact):
mock_boto(artifact)
artifact.add_reference("s3://my-bucket/my_object.pb?versionId=2")
assert artifact.digest == "8aec0d6978da8c2b0bf5662b3fd043a4"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_object.pb"] == {
"digest": "1234567890abcde",
"ref": "s3://my-bucket/my_object.pb",
"extra": {"etag": "1234567890abcde", "versionID": "2"},
"size": 10,
}
def test_add_s3_reference_object_with_name(artifact):
mock_boto(artifact)
artifact.add_reference("s3://my-bucket/my_object.pb", name="renamed.pb")
assert artifact.digest == "bd85fe009dc9e408a5ed9b55c95f47b2"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["renamed.pb"] == {
"digest": "1234567890abcde",
"ref": "s3://my-bucket/my_object.pb",
"extra": {"etag": "1234567890abcde", "versionID": "1"},
"size": 10,
}
def test_add_s3_reference_path(runner, capsys, artifact):
mock_boto(artifact, path=True)
artifact.add_reference("s3://my-bucket/")
assert artifact.digest == "17955d00a20e1074c3bc96c74b724bfe"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_object.pb"] == {
"digest": "1234567890abcde",
"ref": "s3://my-bucket/my_object.pb",
"extra": {"etag": "1234567890abcde", "versionID": "1"},
"size": 10,
}
_, err = capsys.readouterr()
assert "Generating checksum" in err
def test_add_s3_reference_path_with_content_type(runner, capsys, artifact):
with runner.isolated_filesystem():
mock_boto(artifact, path=False, content_type="application/x-directory")
artifact.add_reference("s3://my-bucket/my_dir")
assert artifact.digest == "17955d00a20e1074c3bc96c74b724bfe"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_object.pb"] == {
"digest": "1234567890abcde",
"ref": "s3://my-bucket/my_dir",
"extra": {"etag": "1234567890abcde", "versionID": "1"},
"size": 10,
}
_, err = capsys.readouterr()
assert "Generating checksum" in err
def test_add_s3_max_objects(artifact):
mock_boto(artifact, path=True)
with pytest.raises(ValueError):
artifact.add_reference("s3://my-bucket/", max_objects=1)
def test_add_reference_s3_no_checksum(artifact):
Path("file1.txt").write_text("hello")
mock_boto(artifact)
# TODO: Should we require name in this case?
artifact.add_reference("s3://my_bucket/file1.txt", checksum=False)
assert artifact.digest == "52631787ed3579325f985dc0f2374040"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["file1.txt"] == {
"digest": "s3://my_bucket/file1.txt",
"ref": "s3://my_bucket/file1.txt",
}
def test_add_gs_reference_object(artifact):
mock_gcs(artifact)
artifact.add_reference("gs://my-bucket/my_object.pb")
assert artifact.digest == "8aec0d6978da8c2b0bf5662b3fd043a4"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_object.pb"] == {
"digest": "1234567890abcde",
"ref": "gs://my-bucket/my_object.pb",
"extra": {"versionID": "1"},
"size": 10,
}
def test_load_gs_reference_object_without_generation_and_mismatched_etag(
artifact,
):
mock_gcs(artifact)
artifact.add_reference("gs://my-bucket/my_object.pb")
artifact._state = ArtifactState.COMMITTED
entry = artifact.get_entry("my_object.pb")
entry.extra = {}
entry.digest = "abad0"
with pytest.raises(ValueError, match="Digest mismatch"):
entry.download()
def test_add_gs_reference_object_with_version(artifact):
mock_gcs(artifact)
artifact.add_reference("gs://my-bucket/my_object.pb#2")
assert artifact.digest == "8aec0d6978da8c2b0bf5662b3fd043a4"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_object.pb"] == {
"digest": "1234567890abcde",
"ref": "gs://my-bucket/my_object.pb",
"extra": {"versionID": "2"},
"size": 10,
}
def test_add_gs_reference_object_with_name(artifact):
mock_gcs(artifact)
artifact.add_reference("gs://my-bucket/my_object.pb", name="renamed.pb")
assert artifact.digest == "bd85fe009dc9e408a5ed9b55c95f47b2"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["renamed.pb"] == {
"digest": "1234567890abcde",
"ref": "gs://my-bucket/my_object.pb",
"extra": {"versionID": "1"},
"size": 10,
}
def test_add_gs_reference_path(runner, capsys, artifact):
with runner.isolated_filesystem():
mock_gcs(artifact, path=True)
artifact.add_reference("gs://my-bucket/")
assert artifact.digest == "17955d00a20e1074c3bc96c74b724bfe"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_object.pb"] == {
"digest": "1234567890abcde",
"ref": "gs://my-bucket/my_object.pb",
"extra": {"versionID": "1"},
"size": 10,
}
_, err = capsys.readouterr()
assert "Generating checksum" in err
def test_add_gs_reference_object_no_md5(artifact):
mock_gcs(artifact, hash=False)
artifact.add_reference("gs://my-bucket/my_object.pb")
assert artifact.digest == "8aec0d6978da8c2b0bf5662b3fd043a4"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_object.pb"] == {
"digest": "1234567890abcde",
"ref": "gs://my-bucket/my_object.pb",
"extra": {"versionID": "1"},
"size": 10,
}
def test_add_gs_reference_with_dir_paths(artifact):
mock_gcs(artifact, override_blob_name="my_folder/")
artifact.add_reference("gs://my-bucket/my_folder/")
# uploading a reference to a folder path should add entries for
# everything returned by the list_blobs call
assert len(artifact.manifest.entries) == 1
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["my_other_object.pb"] == {
"digest": "1234567890abcde",
"ref": "gs://my-bucket/my_folder/my_other_object.pb",
"extra": {"versionID": "1"},
"size": 10,
}
def test_load_gs_reference_with_dir_paths(artifact):
mock = mock_gcs(artifact, override_blob_name="my_folder/")
artifact.add_reference("gs://my-bucket/my_folder/")
gcs_handler = GCSHandler()
gcs_handler._client = mock
# simple case where ref ends with "/"
simple_entry = ArtifactManifestEntry(
path="my-bucket/my_folder",
ref="gs://my-bucket/my_folder/",
digest="1234567890abcde",
size=0,
extra={"versionID": 1},
)
with pytest.raises(_GCSIsADirectoryError):
gcs_handler.load_path(simple_entry, local=True)
# case where we didn't store "/" and have to use get_blob
entry = ArtifactManifestEntry(
path="my-bucket/my_folder",
ref="gs://my-bucket/my_folder",
digest="1234567890abcde",
size=0,
extra={"versionID": 1},
)
with pytest.raises(_GCSIsADirectoryError):
gcs_handler.load_path(entry, local=True)
@pytest.fixture
def my_artifact() -> Artifact:
"""A test artifact with a custom type."""
return Artifact("my_artifact", type="my_type")
@pytest.mark.parametrize("name", [None, "my-name"])
@pytest.mark.parametrize("version_id", [None, "v2"])
def test_add_azure_reference_no_checksum(
mock_azure_handler, my_artifact, name, version_id
):
uri = "https://myaccount.blob.core.windows.net/my-container/nonexistent-blob"
if version_id and name:
entries = my_artifact.add_reference(
f"{uri}?versionId={version_id}", name=name, checksum=False
)
elif version_id and not name:
entries = my_artifact.add_reference(
f"{uri}?versionId={version_id}", checksum=False
)
elif (not version_id) and name:
entries = my_artifact.add_reference(uri, name=name, checksum=False)
else:
entries = my_artifact.add_reference(uri, checksum=False)
assert len(entries) == 1
entry = entries[0]
assert entry.path == "nonexistent-blob" if (name is None) else name
assert entry.ref == uri
assert entry.digest == uri
assert entry.size is None
assert entry.extra == {}
@pytest.mark.parametrize("name", [None, "my-name"])
@pytest.mark.parametrize("version_id", [None, "v2"])
def test_add_azure_reference(mock_azure_handler, my_artifact, name, version_id):
uri = "https://myaccount.blob.core.windows.net/my-container/my-blob"
if version_id and name:
entries = my_artifact.add_reference(f"{uri}?versionId={version_id}", name=name)
elif version_id and not name:
entries = my_artifact.add_reference(f"{uri}?versionId={version_id}")
elif (not version_id) and name:
entries = my_artifact.add_reference(uri, name=name)
else:
entries = my_artifact.add_reference(uri)
assert len(entries) == 1
entry = entries[0]
if name is None:
assert entry.path == "my-blob"
else:
assert entry.path == name
if version_id is None:
assert entry.digest == "my-blob version None"
assert entry.extra == {"etag": "my-blob version None"}
else:
assert entry.digest == f"my-blob version {version_id}"
assert entry.extra == {
"etag": f"my-blob version {version_id}",
"versionID": version_id,
}
assert entry.ref == uri
assert entry.size == 42
def test_add_azure_reference_directory(mock_azure_handler):
artifact = Artifact("my_artifact", type="my_type")
entries = artifact.add_reference(
"https://myaccount.blob.core.windows.net/my-container/my-dir"
)
assert len(entries) == 2
assert entries[0].path == "a"
assert (
entries[0].ref
== "https://myaccount.blob.core.windows.net/my-container/my-dir/a"
)
assert entries[0].digest == "my-dir/a version None"
assert entries[0].size == 42
assert entries[0].extra == {"etag": "my-dir/a version None"}
assert entries[1].path == "b"
assert (
entries[1].ref
== "https://myaccount.blob.core.windows.net/my-container/my-dir/b"
)
assert entries[1].digest == "my-dir/b version None"
assert entries[1].size == 42
assert entries[1].extra == {"etag": "my-dir/b version None"}
# with name
artifact = Artifact("my_artifact", type="my_type")
entries = artifact.add_reference(
"https://myaccount.blob.core.windows.net/my-container/my-dir", name="my-name"
)
assert len(entries) == 2
assert entries[0].path == "my-name/a"
assert (
entries[0].ref
== "https://myaccount.blob.core.windows.net/my-container/my-dir/a"
)
assert entries[0].digest == "my-dir/a version None"
assert entries[0].size == 42
assert entries[0].extra == {"etag": "my-dir/a version None"}
assert entries[1].path == "my-name/b"
assert (
entries[1].ref
== "https://myaccount.blob.core.windows.net/my-container/my-dir/b"
)
assert entries[1].digest == "my-dir/b version None"
assert entries[1].size == 42
assert entries[1].extra == {"etag": "my-dir/b version None"}
def test_add_azure_reference_max_objects(mock_azure_handler):
artifact = wandb.Artifact("my_artifact", type="my_type")
entries = artifact.add_reference(
"https://myaccount.blob.core.windows.net/my-container/my-dir",
max_objects=1,
)
assert len(entries) == 1
assert entries[0].path == "a" or entries[0].path == "b"
if entries[0].path == "a":
assert (
entries[0].ref
== "https://myaccount.blob.core.windows.net/my-container/my-dir/a"
)
assert entries[0].digest == "my-dir/a version None"
assert entries[0].size == 42
assert entries[0].extra == {"etag": "my-dir/a version None"}
else:
assert (
entries[1].ref
== "https://myaccount.blob.core.windows.net/my-container/my-dir/b"
)
assert entries[1].digest == "my-dir/b version None"
assert entries[1].size == 42
assert entries[1].extra == {"etag": "my-dir/b version None"}
@responses.activate
def test_add_http_reference_path(artifact):
# Mock the HTTP response. NOTE: Using `responses` here assumes
# that the `requests` library is responsible for sending the HTTP request(s).
responses.get(
url="http://example.com/file1.txt",
headers={
"ETag": '"abc"', # quoting is intentional
"Content-Length": "256",
},
)
artifact.add_reference("http://example.com/file1.txt")
assert artifact.digest == "48237ccc050a88af9dcd869dd5a7e9f4"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["file1.txt"] == {
"digest": "abc",
"ref": "http://example.com/file1.txt",
"size": 256,
"extra": {
"etag": '"abc"',
},
}
def test_add_reference_named_local_file(tmp_path, artifact):
file = tmp_path / "file1.txt"
file.write_text("hello")
uri = file.as_uri()
artifact.add_reference(uri, name="great-file.txt")
assert artifact.digest == "585b9ada17797e37c9cbab391e69b8c5"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["great-file.txt"] == {
"digest": "XUFAKrxLKna5cZ2REBfFkg==",
"ref": uri,
"size": 5,
}
def test_add_reference_unknown_handler(artifact):
artifact.add_reference("ref://example.com/somefile.txt", name="ref")
assert artifact.digest == "410ade94865e89ebe1f593f4379ac228"
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"]["ref"] == {
"digest": "ref://example.com/somefile.txt",
"ref": "ref://example.com/somefile.txt",
}
@pytest.mark.parametrize("name_type", [str, Path, PurePosixPath, PureWindowsPath])
def test_remove_file(name_type, artifact):
file1 = Path("file1.txt")
file1.parent.mkdir(parents=True, exist_ok=True)
file1.write_text("hello")
file2 = Path("file2.txt")
file2.write_text("hello")
artifact.add_file(file1)
artifact.add_file(file2, name="renamed.txt")
artifact.remove(name_type(file1))
artifact.remove(name_type("renamed.txt"))
assert artifact.manifest.entries == {}
@pytest.mark.parametrize("name_type", [str, Path, PurePosixPath, PureWindowsPath])
def test_remove_directory(name_type, artifact):
file1 = Path("bar/foo/file1.txt")
file1.parent.mkdir(parents=True, exist_ok=True)
file1.write_text("hello")
file2 = Path("bar/foo/file2.txt")
file2.write_text("hello2")
artifact.add_dir("bar")
assert len(artifact.manifest.entries) == 2
artifact.remove(name_type("foo"))
assert artifact.manifest.entries == {}
def test_remove_non_existent(artifact):
file1 = Path("baz/foo/file1.txt")
file1.parent.mkdir(parents=True, exist_ok=True)
file1.write_text("hello")
artifact.add_dir("baz")
with pytest.raises(FileNotFoundError):
artifact.remove("file1.txt")
with pytest.raises(FileNotFoundError):
artifact.remove("bar/")
assert len(artifact.manifest.entries) == 1
def test_remove_manifest_entry(artifact):
entry = artifact.add_reference(Path(__file__).as_uri())[0]
artifact.remove(entry)
assert artifact.manifest.entries == {}
def test_artifact_table_deserialize_timestamp_column():
artifact_json = {
"_type": "table",
"column_types": {
"params": {
"type_map": {
"Date Time": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "timestamp"},
]
},
"wb_type": "union",
},
}
},
"wb_type": "typedDict",
},
"columns": [
"Date Time",
],
"data": [
[
1230800400000.0,
],
[
None,
],
],
}
artifact_json_non_null = {
"_type": "table",
"column_types": {
"params": {
"type_map": {
"Date Time": {"wb_type": "timestamp"},
}
},
"wb_type": "typedDict",
},
"columns": [
"Date Time",
],
"data": [
[
1230800400000.0,
],
[
1230807600000.0,
],
],
}
for art in (artifact_json, artifact_json_non_null):
artifact = Artifact(name="test", type="test")
timestamp_idx = art["columns"].index("Date Time")
table = wandb.Table.from_json(art, artifact)
assert [row[timestamp_idx] for row in table.data] == [
datetime.fromtimestamp(row[timestamp_idx] / 1000.0, tz=timezone.utc)
if row[timestamp_idx] is not None
else None
for row in art["data"]
]
def test_add_obj_wbimage_no_classes(assets_path, artifact):
im_path = str(assets_path("2x2.png"))
wb_image = wandb.Image(
im_path,
masks={
"ground_truth": {
"path": im_path,
},
},
)
with pytest.raises(ValueError):
artifact.add(wb_image, "my-image")
def test_add_obj_wbimage(assets_path, artifact):
im_path = str(assets_path("2x2.png"))
wb_image = wandb.Image(im_path, classes=[{"id": 0, "name": "person"}])
artifact.add(wb_image, "my-image")
manifest = artifact.manifest.to_manifest_json()
assert artifact.digest == "7772370e2243066215a845a34f3cc42c"
assert manifest["contents"] == {
"media/classes/65347c6442e21b09b198d62e080e46ce_cls.classes.json": {
"digest": "eG00DqdCcCBqphilriLNfw==",
"size": 64,
},
"media/images/641e917f31888a48f546/2x2.png": {
"digest": "L1pBeGPxG+6XVRQk4WuvdQ==",
"size": 71,
},
"my-image.image-file.json": {
"digest": "IcEgVbPW7fE1a+g577K+VQ==",
"size": 346,
},
}
@pytest.mark.parametrize("overwrite", [True, False])
def test_add_obj_wbimage_again_after_edit(
tmp_path, assets_path, copy_asset, overwrite, artifact
):
orig_path1 = assets_path("test.png")
orig_path2 = assets_path("2x2.png")
assert filecmp.cmp(orig_path1, orig_path2) is False # Consistency check
im_path = tmp_path / "image.png"
copied_path = copy_asset(orig_path1.name, im_path)
assert im_path == copied_path # Consistency check
assert filecmp.cmp(orig_path1, im_path) is True # Consistency check
image_name = "my-image"
wb_image = wandb.Image(str(im_path))
artifact.add(wb_image, image_name, overwrite=overwrite)
manifest1 = artifact.manifest.to_manifest_json()
digest1 = artifact.digest
manifest_contents1 = manifest1["contents"]
assert digest1 == "2a7a8a7f29c929fe05b57983a2944fca"
assert len(manifest_contents1) == 2
# Modify the object, keeping the path unchanged
copied_path = copy_asset(orig_path2.name, im_path)
assert im_path == copied_path # Consistency check
assert filecmp.cmp(orig_path2, im_path) is True # Consistency check
wb_image = wandb.Image(str(im_path))
artifact.add(wb_image, image_name, overwrite=overwrite)
manifest2 = artifact.manifest.to_manifest_json()
digest2 = artifact.digest
manifest_contents2 = manifest2["contents"]
assert overwrite is (digest2 != digest1)
assert overwrite is (manifest_contents2 != manifest_contents1)
# Regardless, we should have the same file paths/names in the manifest
assert manifest_contents1.keys() == manifest_contents2.keys()
def test_add_obj_using_brackets(assets_path, artifact):
im_path = str(assets_path("2x2.png"))
wb_image = wandb.Image(im_path, classes=[{"id": 0, "name": "person"}])
artifact["my-image"] = wb_image
manifest = artifact.manifest.to_manifest_json()
assert artifact.digest == "7772370e2243066215a845a34f3cc42c"
assert manifest["contents"] == {
"media/classes/65347c6442e21b09b198d62e080e46ce_cls.classes.json": {
"digest": "eG00DqdCcCBqphilriLNfw==",
"size": 64,
},
"media/images/641e917f31888a48f546/2x2.png": {
"digest": "L1pBeGPxG+6XVRQk4WuvdQ==",
"size": 71,
},
"my-image.image-file.json": {
"digest": "IcEgVbPW7fE1a+g577K+VQ==",
"size": 346,
},
}
with pytest.raises(ArtifactNotLoggedError):
_ = artifact["my-image"]
@pytest.mark.parametrize("add_duplicate", [True, False], ids=["duplicate", "unique"])
def test_duplicate_wbimage_from_file(assets_path, artifact, add_duplicate):
im_path_1 = str(assets_path("test.png"))
im_path_2 = str(assets_path("test2.png"))
wb_image_1 = wandb.Image(im_path_1)
wb_image_2 = wandb.Image(im_path_1) if add_duplicate else wandb.Image(im_path_2)
artifact.add(wb_image_1, "my-image_1")
artifact.add(wb_image_2, "my-image_2")
if add_duplicate:
assert len(artifact.manifest.entries) == 3
else:
assert len(artifact.manifest.entries) == 4
def test_deduplicate_wbimage_from_array():
im_data_1 = np.random.rand(300, 300, 3)
im_data_2 = np.random.rand(300, 300, 3)
artifact = Artifact(type="dataset", name="artifact")
wb_image_1 = wandb.Image(im_data_1)
wb_image_2 = wandb.Image(im_data_2)
artifact.add(wb_image_1, "my-image_1")
artifact.add(wb_image_2, "my-image_2")
assert len(artifact.manifest.entries) == 4
artifact = Artifact(type="dataset", name="artifact")
wb_image_1 = wandb.Image(im_data_1)
wb_image_2 = wandb.Image(im_data_2)
wb_image_3 = wandb.Image(im_data_1) # yes, should be 1
artifact.add(wb_image_1, "my-image_1")
artifact.add(wb_image_2, "my-image_2")
artifact.add(wb_image_3, "my-image_3")
assert len(artifact.manifest.entries) == 5
@pytest.mark.parametrize("add_duplicate", [True, False], ids=["duplicate", "unique"])
def test_deduplicate_wbimagemask_from_array(artifact, add_duplicate):
im_data_1 = np.random.randint(0, 10, (300, 300))
im_data_2 = np.random.randint(0, 10, (300, 300))
wb_imagemask_1 = data_types.ImageMask({"mask_data": im_data_1}, key="test")
wb_imagemask_2 = data_types.ImageMask(
{"mask_data": im_data_1 if add_duplicate else im_data_2}, key="test2"
)
artifact.add(wb_imagemask_1, "my-imagemask_1")
artifact.add(wb_imagemask_2, "my-imagemask_2")
if add_duplicate:
assert len(artifact.manifest.entries) == 3
else:
assert len(artifact.manifest.entries) == 4
def test_add_obj_wbimage_classes_obj(assets_path, artifact):
im_path = str(assets_path("2x2.png"))
classes = wandb.Classes([{"id": 0, "name": "person"}])
wb_image = wandb.Image(im_path, classes=classes)
artifact.add(wb_image, "my-image")
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"] == {
"media/classes/65347c6442e21b09b198d62e080e46ce_cls.classes.json": {
"digest": "eG00DqdCcCBqphilriLNfw==",
"size": 64,
},
"media/images/641e917f31888a48f546/2x2.png": {
"digest": "L1pBeGPxG+6XVRQk4WuvdQ==",
"size": 71,
},
"my-image.image-file.json": {
"digest": "IcEgVbPW7fE1a+g577K+VQ==",
"size": 346,
},
}
def test_add_obj_wbimage_classes_obj_already_added(assets_path, artifact):
im_path = str(assets_path("2x2.png"))
classes = wandb.Classes([{"id": 0, "name": "person"}])
artifact.add(classes, "my-classes")
wb_image = wandb.Image(im_path, classes=classes)
artifact.add(wb_image, "my-image")
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"] == {
"my-classes.classes.json": {
"digest": "eG00DqdCcCBqphilriLNfw==",
"size": 64,
},
"media/classes/65347c6442e21b09b198d62e080e46ce_cls.classes.json": {
"digest": "eG00DqdCcCBqphilriLNfw==",
"size": 64,
},
"media/images/641e917f31888a48f546/2x2.png": {
"digest": "L1pBeGPxG+6XVRQk4WuvdQ==",
"size": 71,
},
"my-image.image-file.json": {
"digest": "IcEgVbPW7fE1a+g577K+VQ==",
"size": 346,
},
}
def test_add_obj_wbimage_image_already_added(assets_path, artifact):
im_path = str(assets_path("2x2.png"))
artifact.add_file(im_path)
wb_image = wandb.Image(im_path, classes=[{"id": 0, "name": "person"}])
artifact.add(wb_image, "my-image")
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"] == {
"2x2.png": {"digest": "L1pBeGPxG+6XVRQk4WuvdQ==", "size": 71},
"media/classes/65347c6442e21b09b198d62e080e46ce_cls.classes.json": {
"digest": "eG00DqdCcCBqphilriLNfw==",
"size": 64,
},
"my-image.image-file.json": {
"digest": "BPGPVjCBRxX6MNySpv2Rmg==",
"size": 312,
},
}
def test_add_obj_wbtable_images(assets_path, artifact):
im_path = str(assets_path("2x2.png"))
wb_image = wandb.Image(im_path, classes=[{"id": 0, "name": "person"}])
wb_table = wandb.Table(["examples"])
wb_table.add_data(wb_image)
wb_table.add_data(wb_image)
artifact.add(wb_table, "my-table")
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"] == {
"media/classes/65347c6442e21b09b198d62e080e46ce_cls.classes.json": {
"digest": "eG00DqdCcCBqphilriLNfw==",
"size": 64,
},
"media/images/641e917f31888a48f546/2x2.png": {
"digest": "L1pBeGPxG+6XVRQk4WuvdQ==",
"size": 71,
},
"my-table.table.json": {"digest": "UN1SfxHpRdt/OOy7TrjvdQ==", "size": 1315},
}
def test_add_obj_wbtable_images_duplicate_name(assets_path, artifact):
img_1 = str(assets_path("2x2.png"))
img_2 = str(assets_path("test2.png"))
os.mkdir("dir1")
shutil.copy(img_1, "dir1/img.png")
os.mkdir("dir2")
shutil.copy(img_2, "dir2/img.png")
wb_image_1 = wandb.Image(os.path.join("dir1", "img.png"))
wb_image_2 = wandb.Image(os.path.join("dir2", "img.png"))
wb_table = wandb.Table(["examples"])
wb_table.add_data(wb_image_1)
wb_table.add_data(wb_image_2)
artifact.add(wb_table, "my-table")
manifest = artifact.manifest.to_manifest_json()
assert manifest["contents"] == {
"media/images/641e917f31888a48f546/img.png": {
"digest": "L1pBeGPxG+6XVRQk4WuvdQ==",
"size": 71,
},
"media/images/cf37c38fd1dca3aaba6e/img.png": {
"digest": "pQVvBBgcuG+jTN0Xo97eZQ==",
"size": 8837,
},
"my-table.table.json": {"digest": "rkNgqyX3yGEQ1UxM7hsGjQ==", "size": 1006},
}
def test_add_partition_folder(artifact):
table_name = "dataset"
table_parts_dir = "dataset_parts"
partition_table = wandb.data_types.PartitionedTable(parts_path=table_parts_dir)
artifact.add(partition_table, table_name)
manifest = artifact.manifest.to_manifest_json()
assert artifact.digest == "c6a4d80ed84fd68df380425ded894b19"
assert manifest["contents"]["dataset.partitioned-table.json"] == {
"digest": "uo/SjoAO+O7pcSfg+yhlDg==",
"size": 61,
}
@pytest.mark.parametrize(
"headers,expected_digest",
[
({"ETag": "my-etag"}, "my-etag"),
# TODO(spencerpearson): I think this test is wrong:
# if no etag is provided, shouldn't we hash the response body, not simply use the URL?
(None, "https://example.com/foo.json?bar=abc"),
],
)
def test_http_storage_handler_uses_etag_for_digest(
headers: Optional[Mapping[str, str]],
expected_digest: Optional[str],
artifact,
):
with responses.RequestsMock() as rsps, requests.Session() as session:
rsps.add(
"GET",
"https://example.com/foo.json?bar=abc",
json={"result": 1},
headers=headers,
)
handler = HTTPHandler(session)
[entry] = handler.store_path(
artifact, "https://example.com/foo.json?bar=abc", "foo.json"
)
assert entry.path == "foo.json"
assert entry.ref == "https://example.com/foo.json?bar=abc"
assert entry.digest == expected_digest
def test_s3_storage_handler_load_path_missing_reference(monkeypatch, user, artifact):
# Create an artifact that references a non-existent S3 object.
mock_boto(artifact, version_id="")
artifact.add_reference("s3://my-bucket/my_object.pb")
with wandb.init(project="test") as run:
run.log_artifact(artifact)
artifact.wait()
# Patch the S3 handler to return a 404 error when checking the ETag.
def bad_request(*args, **kwargs):
raise util.get_module("botocore").exceptions.ClientError(
operation_name="HeadObject",
error_response={"Error": {"Code": "404", "Message": "Not Found"}},
)
monkeypatch.setattr(S3Handler, "_etag_from_obj", bad_request)
with wandb.init(project="test") as run:
with pytest.raises(FileNotFoundError, match="Unable to find"):
artifact.download()
def test_change_artifact_collection_type(user):
with wandb.init() as run:
artifact = Artifact("image_data", "data")
run.log_artifact(artifact)
with wandb.init() as run:
artifact = run.use_artifact("image_data:latest")
artifact.collection.change_type("lucas_type")
with wandb.init() as run:
artifact = run.use_artifact("image_data:latest")
assert artifact.type == "lucas_type"
def test_change_artifact_collection_type_to_internal_type(user):
with wandb.init() as run:
artifact = Artifact("image_data", "data")
run.log_artifact(artifact).wait()
internal_type = f"{RESERVED_ARTIFACT_TYPE_PREFIX}invalid"
collection = artifact.collection
with wandb.init() as run:
# test deprecated change_type errors for changing to internal type
with pytest.raises(CommError, match="is reserved for internal use"):
collection.change_type(internal_type)
# test .save()
with pytest.raises(CommError, match="is reserved for internal use"):
collection.type = internal_type
collection.save()
def test_change_type_of_internal_artifact_collection(user):
internal_type = f"{RESERVED_ARTIFACT_TYPE_PREFIX}invalid"
with wandb.init() as run:
artifact = InternalArtifact("test-internal", internal_type)
run.log_artifact(artifact).wait()
collection = artifact.collection
with wandb.init() as run:
# test deprecated change_type
with pytest.raises(
CommError, match="is an internal type and cannot be changed"
):
collection.change_type("model")
# test .save()
with pytest.raises(
CommError, match="is an internal type and cannot be changed"
):
collection.type = "model"
collection.save()
@pytest.mark.parametrize(
"invalid_name",
[
"a" * (NAME_MAXLEN + 1), # Name too long
"my/artifact", # Invalid character(s)
],
)
def test_setting_invalid_artifact_collection_name(user, api, invalid_name):
"""Setting an invalid name on an existing ArtifactCollection should fail and raise an error."""
orig_name = "valid-name"
with wandb.init() as run:
artifact = Artifact(orig_name, "data")
run.log_artifact(artifact)
collection = api.artifact_collection(type_name="data", name=orig_name)
with pytest.raises(ValueError):
collection.name = invalid_name
assert collection.name == orig_name
@mark.parametrize(
"new_description",
[
param("", id="empty string"),
param("New description.", id="non-empty string"),
],
)
def test_save_artifact_sequence(
user: str, api: wandb.Api, new_description: Union[str, None]
):
with wandb.init() as run:
artifact = Artifact("sequence_name", "data")
run.log_artifact(artifact)
artifact.wait()
artifact = run.use_artifact("sequence_name:latest")
collection = api.artifact_collection("data", "sequence_name")
collection.description = new_description
collection.name = "new_name"
collection.type = "new_type"
collection.tags = ["tag"]
collection.save()
artifact = run.use_artifact("new_name:latest")
assert artifact.type == "new_type"
collection = artifact.collection
assert collection.type == "new_type"
assert collection.name == "new_name"
assert collection.description == new_description
assert len(collection.tags) == 1 and collection.tags[0] == "tag"
collection.tags = ["new_tag"]
collection.save()
artifact = run.use_artifact("new_name:latest")
collection = artifact.collection
assert len(collection.tags) == 1 and collection.tags[0] == "new_tag"
def test_artifact_standard_url(user, api):
with wandb.init() as run:
artifact = Artifact("sequence_name", "data")
run.log_artifact(artifact)
artifact.wait()
artifact = run.use_artifact("sequence_name:latest")
expected_url = f"{util.app_url(run.settings.base_url)}/{run.entity}/{run.project}/artifacts/data/sequence_name/{artifact.version}"
assert artifact.url == expected_url
def test_artifact_model_registry_url(user, api):
with wandb.init() as run:
artifact = wandb.Artifact("sequence_name", "model")
run.log_artifact(artifact)
artifact.wait()
run.link_artifact(artifact=artifact, target_path="test_model_portfolio")
linked_model_art = run.use_artifact(
f"{artifact.entity}/{artifact.project}/test_model_portfolio:latest"
)
base_url = util.app_url(run.settings.base_url)
encoded_path = f"{linked_model_art.entity}/{linked_model_art.project}/{linked_model_art.collection.name}"
selection_path = quote(encoded_path, safe="")
expected_url = (
f"{base_url}/{linked_model_art.entity}/registry/model?"
f"selectionPath={selection_path}&view=membership&version={linked_model_art.version}"
)
assert linked_model_art.url == expected_url
@mark.parametrize(
"new_description",
[
param(None, id="null"),
param("", id="empty string"),
param("New description.", id="non-empty string"),
],
)
def test_save_artifact_portfolio(
user: str, api: wandb.Api, new_description: Union[str, None]
):
with wandb.init() as run:
artifact = Artifact("image_data", "data")
run.log_artifact(artifact)
artifact.link("portfolio_name")
artifact.wait()
portfolio = api.artifact_collection("data", "portfolio_name")
portfolio.description = new_description
portfolio.name = "new_name"
with pytest.raises(ValueError):
portfolio.type = "new_type"
portfolio.tags = ["tag"]
portfolio.save()
port_artifact = run.use_artifact("new_name:v0")
portfolio = port_artifact.collection
assert portfolio.name == "new_name"
assert portfolio.description == new_description
assert len(portfolio.tags) == 1 and portfolio.tags[0] == "tag"
portfolio.tags = ["new_tag"]
portfolio.save()
artifact = run.use_artifact("new_name:latest")
portfolio = artifact.collection
assert len(portfolio.tags) == 1 and portfolio.tags[0] == "new_tag"
def test_s3_storage_handler_load_path_missing_reference_allowed(
monkeypatch, user, capsys, artifact
):
# Create an artifact that references a non-existent S3 object.
mock_boto(artifact, version_id="")
artifact.add_reference("s3://my-bucket/my_object.pb")
with wandb.init(project="test") as run:
run.log_artifact(artifact)
artifact.wait()
# Patch the S3 handler to return a 404 error when checking the ETag.
def bad_request(*args, **kwargs):
raise util.get_module("botocore").exceptions.ClientError(
operation_name="HeadObject",
error_response={"Error": {"Code": "404", "Message": "Not Found"}},
)
monkeypatch.setattr(S3Handler, "_etag_from_obj", bad_request)
with wandb.init(project="test") as run:
artifact.download(allow_missing_references=True)
# It should still log a warning about skipping the missing reference.
assert "Unable to find my_object.pb" in capsys.readouterr().err
def test_s3_storage_handler_load_path_uses_cache(tmp_path):
uri = "s3://some-bucket/path/to/file.json"
etag = "some etag"
cache = artifact_file_cache.ArtifactFileCache(tmp_path)
path, _, opener = cache.check_etag_obj_path(uri, etag, 123)
with opener() as f:
f.write(123 * "a")
handler = S3Handler()
handler._cache = cache
local_path = handler.load_path(
ArtifactManifestEntry(
path="foo/bar",
ref=uri,
digest=etag,
size=123,
),
local=True,
)
assert local_path == path
def test_tracking_storage_handler(artifact):
handler = TrackingHandler()
[entry] = handler.store_path(artifact, path="/path/to/file.txt", name="some-file")
assert entry.path == "some-file"
assert entry.ref == "/path/to/file.txt"
assert entry.digest == entry.ref
# TODO(spencerpearson): THIS TEST IS BROKEN. I'm pretty sure.
# I'm commenting it out rather than fixing it because this commit should be a no-op.
#
# Empirically, this test fails with:
# AssertionError: assert 'some-file' == '/path/to/file.txt'
# But 'some-file' started out as a `name`, i.e. a LogicalPath,
# representing the location of the file *within the artifact*
# rather than *on the filesystem*.
#
# assert handler.load_path(entry) == "/path/to/file.txt"
def test_manifest_json_version():
pd_manifest = wandb.proto.wandb_internal_pb2.ArtifactManifest()
pd_manifest.version = 1
manifest = wandb.sdk.internal.sender._manifest_json_from_proto(pd_manifest)
assert manifest["version"] == 1
@pytest.mark.parametrize("version", ["1", 1.0])
def test_manifest_version_is_integer(version):
pd_manifest = wandb.proto.wandb_internal_pb2.ArtifactManifest()
with pytest.raises(TypeError):
pd_manifest.version = version
@pytest.mark.parametrize("version", [0, 2])
def test_manifest_json_invalid_version(version):
pd_manifest = wandb.proto.wandb_internal_pb2.ArtifactManifest()
pd_manifest.version = version
with pytest.raises(Exception) as e:
wandb.sdk.internal.sender._manifest_json_from_proto(pd_manifest)
assert "manifest version" in str(e.value)
@pytest.mark.flaky
@pytest.mark.xfail(reason="flaky")
def test_cache_cleanup_allows_upload(user, tmp_path, monkeypatch, artifact):
monkeypatch.setenv("WANDB_CACHE_DIR", str(tmp_path))
cache = artifact_file_cache.get_artifact_file_cache()
with open("test-file", "wb") as f:
f.truncate(2**20)
f.flush()
os.fsync(f)
artifact.add_file("test-file")
# We haven't cached it and can't reclaim its bytes.
assert cache.cleanup(0) == 0
# Deleting the file also shouldn't interfere with the upload.
os.remove("test-file")
# We're still able to upload the artifact.
with wandb.init() as run:
run.log_artifact(artifact)
artifact.wait()
manifest_entry = artifact.manifest.entries["test-file"]
_, found, _ = cache.check_md5_obj_path(manifest_entry.digest, 2**20)
# Now the file should be in the cache.
# Even though this works in production, the test often fails. I don't know why :(.
assert found
assert cache.cleanup(0) == 2**20
def test_artifact_ttl_setter_getter():
art = Artifact("test", type="test")
with pytest.raises(ArtifactNotLoggedError):
_ = art.ttl
assert art._ttl_duration_seconds is None
assert art._ttl_changed is False
assert art._ttl_is_inherited
art = Artifact("test", type="test")
art.ttl = None
assert art.ttl is None
assert art._ttl_duration_seconds is None
assert art._ttl_changed
assert art._ttl_is_inherited is False
art = Artifact("test", type="test")
art.ttl = ArtifactTTL.INHERIT
with pytest.raises(ArtifactNotLoggedError):
_ = art.ttl
assert art._ttl_duration_seconds is None
assert art._ttl_changed
assert art._ttl_is_inherited
ttl_timedelta = timedelta(days=100)
art = Artifact("test", type="test")
art.ttl = ttl_timedelta
assert art.ttl == ttl_timedelta
assert art._ttl_duration_seconds == int(ttl_timedelta.total_seconds())
assert art._ttl_changed
assert art._ttl_is_inherited is False
art = Artifact("test", type="test")
with pytest.raises(ValueError):
art.ttl = timedelta(days=-1)
| TestAddReferenceLocalFileNoChecksumTwice |
python | langchain-ai__langchain | libs/core/langchain_core/retrievers.py | {
"start": 1536,
"end": 11125
} | class ____(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
"""Abstract base class for a document retrieval system.
A retrieval system is defined as something that can take string queries and return
the most 'relevant' documents from some source.
Usage:
A retriever follows the standard `Runnable` interface, and should be used via the
standard `Runnable` methods of `invoke`, `ainvoke`, `batch`, `abatch`.
Implementation:
When implementing a custom retriever, the class should implement the
`_get_relevant_documents` method to define the logic for retrieving documents.
Optionally, an async native implementations can be provided by overriding the
`_aget_relevant_documents` method.
!!! example "Retriever that returns the first 5 documents from a list of documents"
```python
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class SimpleRetriever(BaseRetriever):
docs: list[Document]
k: int = 5
def _get_relevant_documents(self, query: str) -> list[Document]:
\"\"\"Return the first k documents from the list of documents\"\"\"
return self.docs[:self.k]
async def _aget_relevant_documents(self, query: str) -> list[Document]:
\"\"\"(Optional) async native implementation.\"\"\"
return self.docs[:self.k]
```
!!! example "Simple retriever based on a scikit-learn vectorizer"
```python
from sklearn.metrics.pairwise import cosine_similarity
class TFIDFRetriever(BaseRetriever, BaseModel):
vectorizer: Any
docs: list[Document]
tfidf_array: Any
k: int = 4
class Config:
arbitrary_types_allowed = True
def _get_relevant_documents(self, query: str) -> list[Document]:
# Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
query_vec = self.vectorizer.transform([query])
# Op -- (n_docs,1) -- Cosine Sim with each doc
results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
```
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
_new_arg_supported: bool = False
_expects_other_args: bool = False
tags: list[str] | None = None
"""Optional list of tags associated with the retriever.
These tags will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a retriever with its
use case.
"""
metadata: dict[str, Any] | None = None
"""Optional metadata associated with the retriever.
This metadata will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a retriever with its
use case.
"""
@override
def __init_subclass__(cls, **kwargs: Any) -> None:
super().__init_subclass__(**kwargs)
parameters = signature(cls._get_relevant_documents).parameters
cls._new_arg_supported = parameters.get("run_manager") is not None
if (
not cls._new_arg_supported
and cls._aget_relevant_documents == BaseRetriever._aget_relevant_documents
):
# we need to tolerate no run_manager in _aget_relevant_documents signature
async def _aget_relevant_documents(
self: Self, query: str
) -> list[Document]:
return await run_in_executor(None, self._get_relevant_documents, query) # type: ignore[call-arg]
cls._aget_relevant_documents = _aget_relevant_documents # type: ignore[assignment]
# If a V1 retriever broke the interface and expects additional arguments
cls._expects_other_args = (
len(set(parameters.keys()) - {"self", "query", "run_manager"}) > 0
)
def _get_ls_params(self, **_kwargs: Any) -> LangSmithRetrieverParams:
"""Get standard params for tracing."""
default_retriever_name = self.get_name()
if default_retriever_name.startswith("Retriever"):
default_retriever_name = default_retriever_name[9:]
elif default_retriever_name.endswith("Retriever"):
default_retriever_name = default_retriever_name[:-9]
default_retriever_name = default_retriever_name.lower()
return LangSmithRetrieverParams(ls_retriever_name=default_retriever_name)
@override
def invoke(
self, input: str, config: RunnableConfig | None = None, **kwargs: Any
) -> list[Document]:
"""Invoke the retriever to get relevant documents.
Main entry point for synchronous retriever invocations.
Args:
input: The query string.
config: Configuration for the retriever.
**kwargs: Additional arguments to pass to the retriever.
Returns:
List of relevant documents.
Examples:
```python
retriever.invoke("query")
```
"""
config = ensure_config(config)
inheritable_metadata = {
**(config.get("metadata") or {}),
**self._get_ls_params(**kwargs),
}
callback_manager = CallbackManager.configure(
config.get("callbacks"),
None,
verbose=kwargs.get("verbose", False),
inheritable_tags=config.get("tags"),
local_tags=self.tags,
inheritable_metadata=inheritable_metadata,
local_metadata=self.metadata,
)
run_manager = callback_manager.on_retriever_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=kwargs.pop("run_id", None),
)
try:
kwargs_ = kwargs if self._expects_other_args else {}
if self._new_arg_supported:
result = self._get_relevant_documents(
input, run_manager=run_manager, **kwargs_
)
else:
result = self._get_relevant_documents(input, **kwargs_)
except Exception as e:
run_manager.on_retriever_error(e)
raise
else:
run_manager.on_retriever_end(
result,
)
return result
@override
async def ainvoke(
self,
input: str,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> list[Document]:
"""Asynchronously invoke the retriever to get relevant documents.
Main entry point for asynchronous retriever invocations.
Args:
input: The query string.
config: Configuration for the retriever.
**kwargs: Additional arguments to pass to the retriever.
Returns:
List of relevant documents.
Examples:
```python
await retriever.ainvoke("query")
```
"""
config = ensure_config(config)
inheritable_metadata = {
**(config.get("metadata") or {}),
**self._get_ls_params(**kwargs),
}
callback_manager = AsyncCallbackManager.configure(
config.get("callbacks"),
None,
verbose=kwargs.get("verbose", False),
inheritable_tags=config.get("tags"),
local_tags=self.tags,
inheritable_metadata=inheritable_metadata,
local_metadata=self.metadata,
)
run_manager = await callback_manager.on_retriever_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=kwargs.pop("run_id", None),
)
try:
kwargs_ = kwargs if self._expects_other_args else {}
if self._new_arg_supported:
result = await self._aget_relevant_documents(
input, run_manager=run_manager, **kwargs_
)
else:
result = await self._aget_relevant_documents(input, **kwargs_)
except Exception as e:
await run_manager.on_retriever_error(e)
raise
else:
await run_manager.on_retriever_end(
result,
)
return result
@abstractmethod
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for.
run_manager: The callback handler to use.
Returns:
List of relevant documents.
"""
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callback handler to use
Returns:
List of relevant documents
"""
return await run_in_executor(
None,
self._get_relevant_documents,
query,
run_manager=run_manager.get_sync(),
)
| BaseRetriever |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numerictypes.py | {
"start": 1416,
"end": 4038
} | class ____(TestCase):
# scalar types can be promoted into dtypes
wrappers = [np.dtype, lambda x: x]
def test_both_abstract(self):
assert_(np.issubdtype(np.floating, np.inexact))
assert_(not np.issubdtype(np.inexact, np.floating))
def test_same(self):
for cls in (np.float32, np.int32):
for w1, w2 in itertools.product(self.wrappers, repeat=2):
assert_(np.issubdtype(w1(cls), w2(cls)))
def test_subclass(self):
# note we cannot promote floating to a dtype, as it would turn into a
# concrete type
for w in self.wrappers:
assert_(np.issubdtype(w(np.float32), np.floating))
assert_(np.issubdtype(w(np.float64), np.floating))
def test_subclass_backwards(self):
for w in self.wrappers:
assert_(not np.issubdtype(np.floating, w(np.float32)))
assert_(not np.issubdtype(np.floating, w(np.float64)))
def test_sibling_class(self):
for w1, w2 in itertools.product(self.wrappers, repeat=2):
assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
def test_nondtype_nonscalartype(self):
# See gh-14619 and gh-9505 which introduced the deprecation to fix
# this. These tests are directly taken from gh-9505
assert not np.issubdtype(np.float32, "float64")
assert not np.issubdtype(np.float32, "f8")
assert not np.issubdtype(np.int32, "int64")
# for the following the correct spellings are
# np.integer, np.floating, or np.complexfloating respectively:
assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_
assert not np.issubdtype(np.float32, float)
assert not np.issubdtype(np.complex64, complex)
assert not np.issubdtype(np.float32, "float")
assert not np.issubdtype(np.float64, "f")
# Test the same for the correct first datatype and abstract one
# in the case of int, float, complex:
assert np.issubdtype(np.float64, "float64")
assert np.issubdtype(np.float64, "f8")
assert np.issubdtype(np.int64, "int64")
assert np.issubdtype(np.int8, np.integer)
assert np.issubdtype(np.float32, np.floating)
assert np.issubdtype(np.complex64, np.complexfloating)
assert np.issubdtype(np.float64, "float")
assert np.issubdtype(np.float32, "f")
@xpassIfTorchDynamo_np # (
# reason="We do not have (or need) np.core.numerictypes."
# " Our type aliases are in _dtypes.py."
# )
| TestIsSubDType |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py | {
"start": 2800,
"end": 3060
} | class ____(Protocol):
"""Callable that generates a description for a tool call."""
def __call__(self, tool_call: ToolCall, state: AgentState, runtime: Runtime) -> str:
"""Generate a description for a tool call."""
...
| _DescriptionFactory |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.