language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ipython__ipython | tests/test_ultratb.py | {
"start": 7905,
"end": 10256
} | class ____(unittest.TestCase):
DIRECT_CAUSE_ERROR_CODE = """
try:
x = 1 + 2
print(not_defined_here)
except Exception as e:
x += 55
x - 1
y = {}
raise KeyError('uh') from e
"""
EXCEPTION_DURING_HANDLING_CODE = """
try:
x = 1 + 2
print(not_defined_here)
except Exception as e:
x += 55
x - 1
y = {}
raise KeyError('uh')
"""
SUPPRESS_CHAINING_CODE = """
try:
1/0
except Exception:
raise ValueError("Yikes") from None
"""
SYS_EXIT_WITH_CONTEXT_CODE = """
try:
1/0
except Exception as e:
raise SystemExit(1)
"""
def test_direct_cause_error(self):
with tt.AssertPrints(["KeyError", "NameError", "direct cause"]):
ip.run_cell(self.DIRECT_CAUSE_ERROR_CODE)
def test_exception_during_handling_error(self):
with tt.AssertPrints(["KeyError", "NameError", "During handling"]):
ip.run_cell(self.EXCEPTION_DURING_HANDLING_CODE)
def test_sysexit_while_handling_error(self):
with tt.AssertPrints(["SystemExit", "to see the full traceback"]):
with tt.AssertNotPrints(["another exception"], suppress=False):
ip.run_cell(self.SYS_EXIT_WITH_CONTEXT_CODE)
def test_suppress_exception_chaining(self):
with (
tt.AssertNotPrints("ZeroDivisionError"),
tt.AssertPrints("ValueError", suppress=False),
):
ip.run_cell(self.SUPPRESS_CHAINING_CODE)
def test_plain_direct_cause_error(self):
with tt.AssertPrints(["KeyError", "NameError", "direct cause"]):
ip.run_cell("%xmode Plain")
ip.run_cell(self.DIRECT_CAUSE_ERROR_CODE)
ip.run_cell("%xmode Verbose")
def test_plain_exception_during_handling_error(self):
with tt.AssertPrints(["KeyError", "NameError", "During handling"]):
ip.run_cell("%xmode Plain")
ip.run_cell(self.EXCEPTION_DURING_HANDLING_CODE)
ip.run_cell("%xmode Verbose")
def test_plain_suppress_exception_chaining(self):
with (
tt.AssertNotPrints("ZeroDivisionError"),
tt.AssertPrints("ValueError", suppress=False),
):
ip.run_cell("%xmode Plain")
ip.run_cell(self.SUPPRESS_CHAINING_CODE)
ip.run_cell("%xmode Verbose")
| Python3ChainedExceptionsTest |
python | Textualize__textual | src/textual/widgets/_tree.py | {
"start": 1533,
"end": 1671
} | class ____(Exception):
"""Exception raised when referring to an unknown [`TreeNode`][textual.widgets.tree.TreeNode] ID."""
| UnknownNodeID |
python | bokeh__bokeh | src/bokeh/document/events.py | {
"start": 4537,
"end": 4646
} | class ____:
def _column_data_changed(self, event: ColumnDataChangedEvent) -> None: ...
| ColumnDataChangedMixin |
python | apache__thrift | test/py/SerializationTest.py | {
"start": 13579,
"end": 13716
} | class ____(AbstractTest):
protocol_factory = TCompactProtocol.TCompactProtocolAcceleratedFactory(fallback=False)
| AcceleratedCompactTest |
python | huggingface__transformers | src/transformers/models/sam3_tracker/modular_sam3_tracker.py | {
"start": 3123,
"end": 3177
} | class ____(Sam2Processor):
pass
| Sam3TrackerProcessor |
python | ray-project__ray | python/ray/util/collective/util.py | {
"start": 136,
"end": 1303
} | class ____:
"""NCCLUniqueID Store as a named actor class.
Args:
name: the unique name for this named actor.
Attributes:
name: the unique name for this named actor.
nccl_id: the NCCLUniqueID held in this store.
"""
def __init__(self, name):
self.name = name
self.nccl_id = None
self.event = asyncio.Event()
async def set_id(self, uid):
"""
Initialize the NCCL unique ID for this store.
Args:
uid: the unique ID generated via the NCCL generate_communicator_id API.
Returns:
The NCCL unique ID set.
"""
self.nccl_id = uid
self.event.set()
return uid
async def wait_and_get_id(self):
"""Wait for the NCCL unique ID to be set and return it."""
await self.event.wait()
return self.nccl_id
def get_id(self):
"""Get the NCCL unique ID held in this store."""
if not self.nccl_id:
logger.warning(
"The NCCL ID has not been set yet for store {}.".format(self.name)
)
return self.nccl_id
@ray.remote
| NCCLUniqueIDStore |
python | pennersr__django-allauth | allauth/socialaccount/providers/oauth2/views.py | {
"start": 3908,
"end": 7151
} | class ____(OAuth2View):
def dispatch(self, request, *args, **kwargs):
provider = self.adapter.get_provider()
state, resp = self._get_state(request, provider)
if resp:
return resp
if "error" in request.GET or "code" not in request.GET:
# Distinguish cancel from error
auth_error = request.GET.get("error", None)
if auth_error == self.adapter.login_cancelled_error:
error = AuthError.CANCELLED
else:
error = AuthError.UNKNOWN
return render_authentication_error(
request,
provider,
error=error,
extra_context={
"state": state,
"callback_view": self,
},
)
app = provider.app
client = self.adapter.get_client(self.request, app)
try:
access_token = self.adapter.get_access_token_data(
request, app, client, pkce_code_verifier=state.get("pkce_code_verifier")
)
token = self.adapter.parse_token(access_token)
if app.pk:
token.app = app
login = self.adapter.complete_login(
request, app, token, response=access_token
)
login.token = token
login.state = state
return complete_social_login(request, login)
except (
PermissionDenied,
OAuth2Error,
RequestException,
ProviderException,
) as e:
return render_authentication_error(
request, provider, exception=e, extra_context={"state": state}
)
def _redirect_strict_samesite(self, request, provider):
if (
"_redir" in request.GET
or settings.SESSION_COOKIE_SAMESITE.lower() != "strict"
or request.method != "GET"
):
return
redirect_to = request.get_full_path()
redirect_to = add_query_params(redirect_to, {"_redir": ""})
return render(
request,
"socialaccount/login_redirect." + account_settings.TEMPLATE_EXTENSION,
{
"provider": provider,
"redirect_to": redirect_to,
},
)
def _get_state(self, request, provider):
state = None
state_id = get_request_param(request, "state")
if self.adapter.supports_state:
if state_id:
state = statekit.unstash_state(request, state_id)
else:
state = statekit.unstash_last_state(request)
if state is None:
resp = self._redirect_strict_samesite(request, provider)
if resp:
# 'Strict' is in effect, let's try a redirect and then another
# shot at finding our state...
return None, resp
return None, render_authentication_error(
request,
provider,
extra_context={
"state_id": state_id,
"callback_view": self,
},
)
return state, None
| OAuth2CallbackView |
python | facebook__pyre-check | tools/upgrade/tests/configuration_test.py | {
"start": 342,
"end": 2632
} | class ____(unittest.TestCase):
mock_completed_process = MagicMock(stdout="[]")
@patch.object(subprocess, "call")
# pyre-fixme[56]: Pyre was not able to infer the type of argument `subprocess`
# to decorator factory `unittest.mock.patch.object`.
@patch.object(subprocess, "run", return_value=mock_completed_process)
def test_get_errors__no_targets(
self, run: MagicMock, buck_clean: MagicMock
) -> None:
configuration = Configuration(Path("path"), {})
configuration.get_errors()
buck_clean.assert_not_called()
run.assert_called_once()
@patch.object(subprocess, "call")
# pyre-fixme[56]: Pyre was not able to infer the type of argument `subprocess`
# to decorator factory `unittest.mock.patch.object`.
@patch.object(subprocess, "run", return_value=mock_completed_process)
def test_get_errors__targets(self, run: MagicMock, buck_clean: MagicMock) -> None:
configuration = Configuration(Path("path"), {})
configuration.targets = ["//target/..."]
configuration.get_errors()
buck_clean.assert_called_once()
run.assert_called_once()
def test_get_contents__preserve_explicit_false_options(self) -> None:
configuration = Configuration(
Path("path"), json_contents={"strict": False, "use_buck_builder": False}
)
self.assertEqual(
configuration.get_contents(), {"strict": False, "use_buck_builder": False}
)
def test_get_contents__preserve_untracked_option(self) -> None:
configuration = Configuration(Path("path"), json_contents={"foo": True})
self.assertEqual(configuration.get_contents(), {"foo": True})
def test_get_contents__include_new_attribute(self) -> None:
configuration = Configuration(Path("path"), json_contents={"strict": False})
configuration.version = "1234"
self.assertEqual(
configuration.get_contents(), {"strict": False, "version": "1234"}
)
def test_get_contents__update_existing_attribute(self) -> None:
configuration = Configuration(Path("path"), json_contents={"strict": False})
configuration.strict = True
self.assertEqual(configuration.get_contents(), {"strict": True})
| ConfigurationTest |
python | wandb__wandb | wandb/vendor/pygments/styles/trac.py | {
"start": 395,
"end": 1933
} | class ____(Style):
"""
Port of the default trac highlighter design.
"""
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #999988',
Comment.Preproc: 'bold noitalic #999999',
Comment.Special: 'bold #999999',
Operator: 'bold',
String: '#bb8844',
String.Regex: '#808000',
Number: '#009999',
Keyword: 'bold',
Keyword.Type: '#445588',
Name.Builtin: '#999999',
Name.Function: 'bold #990000',
Name.Class: 'bold #445588',
Name.Exception: 'bold #990000',
Name.Namespace: '#555555',
Name.Variable: '#008080',
Name.Constant: '#008080',
Name.Tag: '#000080',
Name.Attribute: '#008080',
Name.Entity: '#800080',
Generic.Heading: '#999999',
Generic.Subheading: '#aaaaaa',
Generic.Deleted: 'bg:#ffdddd #000000',
Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
| TracStyle |
python | pypa__pipenv | pipenv/patched/pip/_internal/exceptions.py | {
"start": 13934,
"end": 15498
} | class ____(InstallationError):
"""
A failure to verify a package against known-good hashes
:cvar order: An int sorting hash exception classes by difficulty of
recovery (lower being harder), so the user doesn't bother fretting
about unpinned packages when he has deeper issues, like VCS
dependencies, to deal with. Also keeps error reports in a
deterministic order.
:cvar head: A section heading for display above potentially many
exceptions of this kind
:ivar req: The InstallRequirement that triggered this error. This is
pasted on after the exception is instantiated, because it's not
typically available earlier.
"""
req: Optional["InstallRequirement"] = None
head = ""
order: int = -1
def body(self) -> str:
"""Return a summary of me for display under the heading.
This default implementation simply prints a description of the
triggering requirement.
:param req: The InstallRequirement that provoked this error, with
its link already populated by the resolver's _populate_link().
"""
return f" {self._requirement_name()}"
def __str__(self) -> str:
return f"{self.head}\n{self.body()}"
def _requirement_name(self) -> str:
"""Return a description of the requirement that triggered me.
This default implementation returns long description of the req, with
line numbers
"""
return str(self.req) if self.req else "unknown package"
| HashError |
python | networkx__networkx | networkx/algorithms/tests/test_distance_measures.py | {
"start": 14590,
"end": 18237
} | class ____:
@classmethod
def setup_class(cls):
global np
np = pytest.importorskip("numpy")
sp = pytest.importorskip("scipy")
def setup_method(self):
G = nx.Graph()
G.add_edge(1, 2, weight=2)
G.add_edge(2, 3, weight=4)
G.add_edge(3, 4, weight=1)
G.add_edge(1, 4, weight=3)
self.G = G
def test_resistance_distance_directed_graph(self):
G = nx.DiGraph()
with pytest.raises(nx.NetworkXNotImplemented):
nx.resistance_distance(G)
def test_resistance_distance_empty(self):
G = nx.Graph()
with pytest.raises(nx.NetworkXError):
nx.resistance_distance(G)
def test_resistance_distance_not_connected(self):
with pytest.raises(nx.NetworkXError):
self.G.add_node(5)
nx.resistance_distance(self.G, 1, 5)
def test_resistance_distance_nodeA_not_in_graph(self):
with pytest.raises(nx.NetworkXError):
nx.resistance_distance(self.G, 9, 1)
def test_resistance_distance_nodeB_not_in_graph(self):
with pytest.raises(nx.NetworkXError):
nx.resistance_distance(self.G, 1, 9)
def test_resistance_distance(self):
rd = nx.resistance_distance(self.G, 1, 3, "weight", True)
test_data = 1 / (1 / (2 + 4) + 1 / (1 + 3))
assert round(rd, 5) == round(test_data, 5)
def test_resistance_distance_noinv(self):
rd = nx.resistance_distance(self.G, 1, 3, "weight", False)
test_data = 1 / (1 / (1 / 2 + 1 / 4) + 1 / (1 / 1 + 1 / 3))
assert round(rd, 5) == round(test_data, 5)
def test_resistance_distance_no_weight(self):
rd = nx.resistance_distance(self.G, 1, 3)
assert round(rd, 5) == 1
def test_resistance_distance_neg_weight(self):
self.G[2][3]["weight"] = -4
rd = nx.resistance_distance(self.G, 1, 3, "weight", True)
test_data = 1 / (1 / (2 + -4) + 1 / (1 + 3))
assert round(rd, 5) == round(test_data, 5)
def test_multigraph(self):
G = nx.MultiGraph()
G.add_edge(1, 2, weight=2)
G.add_edge(2, 3, weight=4)
G.add_edge(3, 4, weight=1)
G.add_edge(1, 4, weight=3)
rd = nx.resistance_distance(G, 1, 3, "weight", True)
assert np.isclose(rd, 1 / (1 / (2 + 4) + 1 / (1 + 3)))
def test_resistance_distance_div0(self):
with pytest.raises(ZeroDivisionError):
self.G[1][2]["weight"] = 0
nx.resistance_distance(self.G, 1, 3, "weight")
def test_resistance_distance_same_node(self):
assert nx.resistance_distance(self.G, 1, 1) == 0
def test_resistance_distance_only_nodeA(self):
rd = nx.resistance_distance(self.G, nodeA=1)
test_data = {}
test_data[1] = 0
test_data[2] = 0.75
test_data[3] = 1
test_data[4] = 0.75
assert isinstance(rd, dict)
assert sorted(rd.keys()) == sorted(test_data.keys())
for key in rd:
assert np.isclose(rd[key], test_data[key])
def test_resistance_distance_only_nodeB(self):
rd = nx.resistance_distance(self.G, nodeB=1)
test_data = {}
test_data[1] = 0
test_data[2] = 0.75
test_data[3] = 1
test_data[4] = 0.75
assert isinstance(rd, dict)
assert sorted(rd.keys()) == sorted(test_data.keys())
for key in rd:
assert np.isclose(rd[key], test_data[key])
def test_resistance_distance_all(self):
rd = nx.resistance_distance(self.G)
assert isinstance(rd, dict)
assert round(rd[1][3], 5) == 1
| TestResistanceDistance |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0052_alter_versionautomationrule_polymorphic_ctype.py | {
"start": 181,
"end": 828
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("builds", "0051_add_addons_field"),
]
operations = [
migrations.AlterField(
model_name="versionautomationrule",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_%(app_label)s.%(class)s_set+",
to="contenttypes.contenttype",
),
),
]
| Migration |
python | run-llama__llama_index | llama-index-utils/llama-index-utils-qianfan/llama_index/utils/qianfan/apis.py | {
"start": 245,
"end": 716
} | class ____(BaseModel):
"""
Model service item.
"""
name: str
"""model name. example: ERNIE-4.0-8K"""
url: str
"""endpoint url. example: https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro"""
api_type: APIType = Field(..., alias="apiType")
"""api type"""
charge_status: Literal["NOTOPEN", "OPENED", "STOP", "FREE"] = Field(
..., alias="chargeStatus"
)
"""Payment status"""
| ServiceItem |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/tests/simple_test_envs.py | {
"start": 10184,
"end": 12475
} | class ____(SimpleEnvironment):
def __init__(self, brain_names, action_sizes=(1, 0), step_size=0.2):
super().__init__(brain_names, action_sizes=action_sizes, step_size=step_size)
# Number of steps to reveal the goal for. Lower is harder. Should be
# less than 1/step_size to force agent to use memory
self.num_show_steps = 2
def _make_batched_step(
self, name: str, done: bool, reward: float, group_reward: float
) -> Tuple[DecisionSteps, TerminalSteps]:
recurrent_obs_val = (
self.goal[name] if self.step_count[name] <= self.num_show_steps else 0
)
m_vector_obs = self._make_obs(recurrent_obs_val)
m_reward = np.array([reward], dtype=np.float32)
m_agent_id = np.array([self.agent_id[name]], dtype=np.int32)
m_group_id = np.array([0], dtype=np.int32)
m_group_reward = np.array([group_reward], dtype=np.float32)
action_mask = self._generate_mask()
decision_step = DecisionSteps(
m_vector_obs, m_reward, m_agent_id, action_mask, m_group_id, m_group_reward
)
terminal_step = TerminalSteps.empty(self.behavior_spec)
if done:
self.final_rewards[name].append(self.rewards[name])
self._reset_agent(name)
recurrent_obs_val = (
self.goal[name] if self.step_count[name] <= self.num_show_steps else 0
)
new_vector_obs = self._make_obs(recurrent_obs_val)
(
new_reward,
new_done,
new_agent_id,
new_action_mask,
new_group_id,
new_group_reward,
) = self._construct_reset_step(name)
decision_step = DecisionSteps(
new_vector_obs,
new_reward,
new_agent_id,
new_action_mask,
new_group_id,
new_group_reward,
)
terminal_step = TerminalSteps(
m_vector_obs,
m_reward,
np.array([False], dtype=np.bool),
m_agent_id,
m_group_id,
m_group_reward,
)
return (decision_step, terminal_step)
| MemoryEnvironment |
python | apache__airflow | providers/apache/hdfs/src/airflow/providers/apache/hdfs/sensors/web_hdfs.py | {
"start": 1797,
"end": 2956
} | class ____(BaseSensorOperator):
"""Waits for multiple files in a folder to land in HDFS."""
template_fields: Sequence[str] = ("directory_path", "expected_filenames")
def __init__(
self,
*,
directory_path: str,
expected_filenames: Sequence[str],
webhdfs_conn_id: str = "webhdfs_default",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.directory_path = directory_path
self.expected_filenames = expected_filenames
self.webhdfs_conn_id = webhdfs_conn_id
def poke(self, context: Context) -> bool:
from airflow.providers.apache.hdfs.hooks.webhdfs import WebHDFSHook
hook = WebHDFSHook(self.webhdfs_conn_id)
conn: KerberosClient | InsecureClient = hook.get_conn()
actual_files = set(conn.list(self.directory_path))
self.log.debug("Files Found in directory: %s", actual_files)
missing_files = set(self.expected_filenames) - actual_files
if missing_files:
self.log.info("There are missing files: %s", missing_files)
return False
return True
| MultipleFilesWebHdfsSensor |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/dataframeeditor.py | {
"start": 57542,
"end": 65284
} | class ____(QAbstractTableModel, SpyderFontsMixin):
"""
This class is the model for the header and index of the DataFrameEditor.
Taken from gtabview project (Header4ExtModel).
For more information please see:
https://github.com/wavexx/gtabview/blob/master/gtabview/viewer.py
"""
COLUMN_INDEX = -1 # Makes reference to the index of the table.
def __init__(self, model, axis, use_monospace_font=False):
"""
Header constructor.
The 'model' is the QAbstractTableModel of the dataframe, the 'axis' is
to acknowledge if is for the header (horizontal - 0) or for the
index (vertical - 1) and the palette is the set of colors to use.
"""
super().__init__()
self.model = model
self.axis = axis
self.use_monospace_font = use_monospace_font
self.total_rows = self.model.shape[0]
self.total_cols = self.model.shape[1]
self.cols_loaded = self.model.cols_loaded
self.rows_loaded = self.model.rows_loaded
if self.axis == 0:
self.total_cols = self.model.shape[1]
self._shape = (self.model.header_shape[0], self.model.shape[1])
else:
self.total_rows = self.model.shape[0]
self._shape = (self.model.shape[0], self.model.header_shape[1])
def rowCount(self, index=None):
"""Get number of rows in the header."""
if self.axis == 0:
return max(1, self._shape[0])
else:
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded
def columnCount(self, index=QModelIndex()):
"""DataFrame column number"""
if self.axis == 0:
if self.total_cols <= self.cols_loaded:
return self.total_cols
else:
return self.cols_loaded
else:
return max(1, self._shape[1])
def fetch_more(self, rows=False, columns=False):
"""Get more columns or rows (based on axis)."""
if self.axis == 1 and self.total_rows > self.rows_loaded:
reminder = self.total_rows - self.rows_loaded
items_to_fetch = min(reminder, ROWS_TO_LOAD)
self.beginInsertRows(QModelIndex(), self.rows_loaded,
self.rows_loaded + items_to_fetch - 1)
self.rows_loaded += items_to_fetch
self.endInsertRows()
if self.axis == 0 and self.total_cols > self.cols_loaded:
reminder = self.total_cols - self.cols_loaded
items_to_fetch = min(reminder, COLS_TO_LOAD)
self.beginInsertColumns(QModelIndex(), self.cols_loaded,
self.cols_loaded + items_to_fetch - 1)
self.cols_loaded += items_to_fetch
self.endInsertColumns()
def sort(self, column, order=Qt.AscendingOrder):
"""Overriding sort method."""
ascending = order == Qt.AscendingOrder
self.model.sort(self.COLUMN_INDEX, order=ascending)
return True
def headerData(self, section, orientation, role):
"""Get the information to put in the header."""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return Qt.AlignCenter
else:
return int(Qt.AlignRight | Qt.AlignVCenter)
if role != Qt.DisplayRole and role != Qt.ToolTipRole:
return None
if self.axis == 1 and self._shape[1] <= 1:
return None
orient_axis = 0 if orientation == Qt.Horizontal else 1
if self.model.header_shape[orient_axis] > 1:
header = section
else:
header = self.model.header(self.axis, section)
# Don't perform any conversion on strings
# because it leads to differences between
# the data present in the dataframe and
# what is shown by Spyder
if not type(header) in [str, bytes]:
header = str(header)
return header
def data(self, index, role):
"""
Get the data for the header.
This is used when a header has levels.
"""
if (
not index.isValid()
or index.row() >= self._shape[0]
or index.column() >= self._shape[1]
):
return None
row, col = (
(index.row(), index.column()) if self.axis == 0
else (index.column(), index.row())
)
if self.use_monospace_font and role == Qt.FontRole:
return self.get_font(SpyderFontType.MonospaceInterface)
if role != Qt.DisplayRole:
return None
if self.axis == 0 and self._shape[0] <= 1:
return None
header = self.model.header(self.axis, col, row)
# Don't perform any conversion on strings
# because it leads to differences between
# the data present in the dataframe and
# what is shown by Spyder
if not type(header) in [str, bytes]:
header = str(header)
return header
def flags(self, index):
"""Set flags"""
return (QAbstractTableModel.flags(self, index) |
Qt.ItemFlag.ItemIsEditable |
Qt.ItemFlag.ItemIsEnabled |
Qt.ItemFlag.ItemIsSelectable
)
def setData(self, index, value, role):
"""Cell content change"""
df = self.model.df
if role == Qt.EditRole:
if self.axis == 1:
old_value = df.index[index.row()]
if value not in df.index.tolist():
if type(old_value) is tuple:
old_value_list = list(old_value)
rows = df.index
names = rows.names
old_value_list[index.column()] = value
rows = (
df.index.tolist()[0:index.row()]
+ [tuple(old_value_list)]
+ df.index.tolist()[index.row()+1:]
)
df.index = pd.MultiIndex.from_tuples(rows, names=names)
else:
try:
df.rename(index={old_value: value}, inplace=True,
errors='raise')
except TypeError as e:
QMessageBox.warning(
self.model().dialog,
_("Warning: It was not possible to remove "
"this index!"),
_("ValueError: {} must be removed from "
"index.").format(str(e))
)
return False
else:
QMessageBox.warning(
self.model().dialog,
_("Warning: Duplicate index!"),
_('Row with name "{}" already exists!').format(value)
)
return False
self.model.dialog._reload()
self.model.dataChanged.emit(index, index)
return True
if self.axis == 0:
old_value = df.columns[index.column()]
try:
df.rename(columns={old_value: value}, inplace=True,
errors='raise')
except Exception:
return False
return True
return True
return False
| DataFrameHeaderModel |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 43975,
"end": 48301
} | class ____(DPTPreTrainedModel):
def __init__(self, config: DPTConfig):
super().__init__(config)
self.dpt = DPTModel(config, add_pooling_layer=False)
# Neck
self.neck = DPTNeck(config)
# Segmentation head(s)
self.head = DPTSemanticSegmentationHead(config)
self.auxiliary_head = DPTAuxiliaryHead(config) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
**kwargs,
) -> SemanticSegmenterOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, DPTForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("Intel/dpt-large-ade")
>>> model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
if labels is not None and self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one")
outputs: BaseModelOutputWithPoolingAndIntermediateActivations = self.dpt(
pixel_values, output_hidden_states=True, **kwargs
)
hidden_states = outputs.hidden_states
# only keep certain features based on config.backbone_out_indices
# note that the hidden_states also include the initial embeddings
if not self.config.is_hybrid:
hidden_states = [
feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices
]
else:
backbone_hidden_states = outputs.intermediate_activations
backbone_hidden_states.extend(
feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices[2:]
)
hidden_states = backbone_hidden_states
hidden_states = self.neck(hidden_states=hidden_states)
logits = self.head(hidden_states)
auxiliary_logits = None
if self.auxiliary_head is not None:
auxiliary_logits = self.auxiliary_head(hidden_states[-1])
loss = None
if labels is not None:
# upsample logits to the images' original size
upsampled_logits = nn.functional.interpolate(
logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
)
if auxiliary_logits is not None:
upsampled_auxiliary_logits = nn.functional.interpolate(
auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
)
# compute weighted loss
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
main_loss = loss_fct(upsampled_logits, labels)
auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
return SemanticSegmenterOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=outputs.attentions,
)
__all__ = ["DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel"]
| DPTForSemanticSegmentation |
python | getsentry__sentry | src/sentry/notifications/api/endpoints/notification_defaults.py | {
"start": 394,
"end": 1104
} | class ____(Endpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ALERTS_NOTIFICATIONS
permission_classes = ()
def get(self, request: Request) -> Response:
"""
Return the default config for notification settings.
This becomes the fallback in the UI.
"""
return Response(
{
"providerDefaults": [provider.value for provider in DEFAULT_ENABLED_PROVIDERS],
"typeDefaults": {
type.value: default.value
for type, default in NOTIFICATION_SETTINGS_TYPE_DEFAULTS.items()
},
}
)
| NotificationDefaultsEndpoints |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_quotes/docstring_doubles_mixed_quotes_class_var_2.py | {
"start": 0,
"end": 357
} | class ____():
"Do not"' start with empty string' ' and lint docstring safely'
""" Not a docstring """
def foo(self, bar="""not a docstring"""):
"Do not"' start with empty string' ' and lint docstring safely'
pass
class Nested(foo()[:]): "Do not"' start with empty string' ' and lint docstring safely'; pass
| SingleLineDocstrings |
python | facebook__pyre-check | tools/upgrade/ast.py | {
"start": 508,
"end": 1670
} | class ____(Exception):
pass
def check_stable(input: str, transformed: str) -> None:
parsed_original = ast.parse(input)
try:
parsed_transformed = ast.parse(transformed)
if ast.dump(parsed_original) != ast.dump(parsed_transformed):
raise UnstableAST("ASTs differ")
except SyntaxError:
raise UnstableAST("Could not parse transformed AST")
def check_stable_transformation(
# pyre-fixme[31]: Expression `Concatenate[(str,
# $local_tools?pyre?tools?upgrade?ast$Ts)], str)]` is not a valid type.
# pyre-fixme[31]: Expression `Concatenate[(str,
# $local_tools?pyre?tools?upgrade?ast$Ts)], str)]` is not a valid type.
transform: "Callable[Concatenate[str, Ts], str]",
# pyre-fixme[31]: Expression `Concatenate[(str,
# $local_tools?pyre?tools?upgrade?ast$Ts)], str)]` is not a valid type.
) -> "Callable[Concatenate[str, Ts], str]":
# pyre-fixme[11]: Annotation `Ts` is not defined as a type.
def wrapper(input: str, *args: Ts) -> str:
transformed = transform(input, *args)
check_stable(input, transformed)
return transformed
return wrapper
| UnstableAST |
python | mlflow__mlflow | mlflow/store/model_registry/sqlalchemy_store.py | {
"start": 2744,
"end": 62662
} | class ____(AbstractStore):
"""
This entity may change or be removed in a future release without warning.
SQLAlchemy compliant backend store for tracking meta data for MLflow entities. MLflow
supports the database dialects ``mysql``, ``mssql``, ``sqlite``, and ``postgresql``.
As specified in the
`SQLAlchemy docs <https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls>`_ ,
the database URI is expected in the format
``<dialect>+<driver>://<username>:<password>@<host>:<port>/<database>``. If you do not
specify a driver, SQLAlchemy uses a dialect's default driver.
This store interacts with SQL store using SQLAlchemy abstractions defined for MLflow entities.
:py:class:`mlflow.store.model_registry.models.RegisteredModel` and
:py:class:`mlflow.store.model_registry.models.ModelVersion`
"""
CREATE_MODEL_VERSION_RETRIES = 3
def __init__(self, db_uri):
"""
Create a database backed store.
Args:
db_uri: The SQLAlchemy database URI string to connect to the database. See
the `SQLAlchemy docs
<https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls>`_
for format specifications. MLflow supports the dialects ``mysql``,
``mssql``, ``sqlite``, and ``postgresql``.
default_artifact_root: Path/URI to location suitable for large data (such as a blob
store object, DBFS path, or shared NFS file system).
"""
super().__init__()
self.db_uri = db_uri
self.db_type = extract_db_type_from_uri(db_uri)
self.engine = mlflow.store.db.utils.create_sqlalchemy_engine_with_retry(db_uri)
if not mlflow.store.db.utils._all_tables_exist(self.engine):
mlflow.store.db.utils._initialize_tables(self.engine)
# Verify that all model registry tables exist.
SqlAlchemyStore._verify_registry_tables_exist(self.engine)
SessionMaker = sqlalchemy.orm.sessionmaker(bind=self.engine)
self.ManagedSessionMaker = mlflow.store.db.utils._get_managed_session_maker(
SessionMaker, self.db_type
)
# TODO: verify schema here once we add logic to initialize the registry tables if they
# don't exist (schema verification will fail in tests otherwise)
# mlflow.store.db.utils._verify_schema(self.engine)
def _get_dialect(self):
return self.engine.dialect.name
def _dispose_engine(self):
self.engine.dispose()
@staticmethod
def _verify_registry_tables_exist(engine):
# Verify that all tables have been created.
inspected_tables = set(sqlalchemy.inspect(engine).get_table_names())
expected_tables = [
SqlRegisteredModel.__tablename__,
SqlModelVersion.__tablename__,
SqlWebhook.__tablename__,
SqlWebhookEvent.__tablename__,
]
if any(table not in inspected_tables for table in expected_tables):
# TODO: Replace the MlflowException with the following line once it's possible to run
# the registry against a different DB than the tracking server:
# mlflow.store.db.utils._initialize_tables(self.engine)
raise MlflowException("Database migration in unexpected state. Run manual upgrade.")
@staticmethod
def _get_eager_registered_model_query_options():
"""
A list of SQLAlchemy query options that can be used to eagerly
load the following registered model attributes
when fetching a registered model: ``registered_model_tags``.
"""
# Use a subquery load rather than a joined load in order to minimize the memory overhead
# of the eager loading procedure. For more information about relationship loading
# techniques, see https://docs.sqlalchemy.org/en/13/orm/
# loading_relationships.html#relationship-loading-techniques
return [sqlalchemy.orm.subqueryload(SqlRegisteredModel.registered_model_tags)]
@staticmethod
def _get_eager_model_version_query_options():
"""
A list of SQLAlchemy query options that can be used to eagerly
load the following model version attributes
when fetching a model version: ``model_version_tags``.
"""
# Use a subquery load rather than a joined load in order to minimize the memory overhead
# of the eager loading procedure. For more information about relationship loading
# techniques, see https://docs.sqlalchemy.org/en/13/orm/
# loading_relationships.html#relationship-loading-techniques
return [sqlalchemy.orm.subqueryload(SqlModelVersion.model_version_tags)]
def create_registered_model(self, name, tags=None, description=None, deployment_job_id=None):
"""
Create a new registered model in backend store.
Args:
name: Name of the new model. This is expected to be unique in the backend store.
tags: A list of :py:class:`mlflow.entities.model_registry.RegisteredModelTag`
instances associated with this registered model.
description: Description of the version.
deployment_job_id: Optional deployment job ID.
Returns:
A single object of :py:class:`mlflow.entities.model_registry.RegisteredModel`
created in the backend.
"""
_validate_model_name(name)
for tag in tags or []:
_validate_registered_model_tag(tag.key, tag.value)
with self.ManagedSessionMaker() as session:
try:
creation_time = get_current_time_millis()
registered_model = SqlRegisteredModel(
name=name,
creation_time=creation_time,
last_updated_time=creation_time,
description=description,
)
tags_dict = {}
for tag in tags or []:
tags_dict[tag.key] = tag.value
registered_model.registered_model_tags = [
SqlRegisteredModelTag(key=key, value=value) for key, value in tags_dict.items()
]
session.add(registered_model)
session.flush()
return registered_model.to_mlflow_entity()
except sqlalchemy.exc.IntegrityError:
existing_model = self.get_registered_model(name)
handle_resource_already_exist_error(
name, has_prompt_tag(existing_model._tags), has_prompt_tag(tags)
)
@classmethod
def _get_registered_model(cls, session, name, eager=False):
"""
Args:
eager: If ``True``, eagerly loads the registered model's tags. If ``False``, these
attributes are not eagerly loaded and will be loaded when their corresponding object
properties are accessed from the resulting ``SqlRegisteredModel`` object.
"""
_validate_model_name(name)
query_options = cls._get_eager_registered_model_query_options() if eager else []
rms = (
session.query(SqlRegisteredModel)
.options(*query_options)
.filter(SqlRegisteredModel.name == name)
.all()
)
if len(rms) == 0:
raise MlflowException(
f"Registered Model with name={name} not found", RESOURCE_DOES_NOT_EXIST
)
if len(rms) > 1:
raise MlflowException(
f"Expected only 1 registered model with name={name}. Found {len(rms)}.",
INVALID_STATE,
)
return rms[0]
def update_registered_model(self, name, description, deployment_job_id=None):
"""
Update description of the registered model.
Args:
name: Registered model name.
description: New description.
deployment_job_id: Optional deployment job ID.
Returns:
A single updated :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
with self.ManagedSessionMaker() as session:
sql_registered_model = self._get_registered_model(session, name)
updated_time = get_current_time_millis()
sql_registered_model.description = description
sql_registered_model.last_updated_time = updated_time
session.add(sql_registered_model)
session.flush()
return sql_registered_model.to_mlflow_entity()
def rename_registered_model(self, name, new_name):
"""
Rename the registered model.
Args:
name: Registered model name.
new_name: New proposed name.
Returns:
A single updated :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
_validate_model_renaming(new_name)
with self.ManagedSessionMaker() as session:
sql_registered_model = self._get_registered_model(session, name)
try:
updated_time = get_current_time_millis()
sql_registered_model.name = new_name
for sql_model_version in sql_registered_model.model_versions:
sql_model_version.name = new_name
sql_model_version.last_updated_time = updated_time
sql_registered_model.last_updated_time = updated_time
session.add_all([sql_registered_model] + sql_registered_model.model_versions)
session.flush()
return sql_registered_model.to_mlflow_entity()
except sqlalchemy.exc.IntegrityError as e:
raise MlflowException(
f"Registered Model (name={new_name}) already exists. Error: {e}",
RESOURCE_ALREADY_EXISTS,
)
def delete_registered_model(self, name):
"""
Delete the registered model.
Backend raises exception if a registered model with given name does not exist.
Args:
name: Registered model name.
Returns:
None
"""
with self.ManagedSessionMaker() as session:
sql_registered_model = self._get_registered_model(session, name)
session.delete(sql_registered_model)
def _compute_next_token(self, max_results_for_query, current_size, offset, max_results):
next_token = None
if max_results_for_query == current_size:
final_offset = offset + max_results
next_token = SearchUtils.create_page_token(final_offset)
return next_token
def search_registered_models(
self,
filter_string=None,
max_results=SEARCH_REGISTERED_MODEL_MAX_RESULTS_DEFAULT,
order_by=None,
page_token=None,
):
"""
Search for registered models in backend that satisfy the filter criteria.
Args:
filter_string: Filter query string, defaults to searching all registered models.
max_results: Maximum number of registered models desired.
order_by: List of column names with ASC|DESC annotation, to be used for ordering
matching search results.
page_token: Token specifying the next page of results. It should be obtained from
a ``search_registered_models`` call.
Returns:
A PagedList of :py:class:`mlflow.entities.model_registry.RegisteredModel` objects
that satisfy the search expressions. The pagination token for the next page can be
obtained via the ``token`` attribute of the object.
"""
if max_results > SEARCH_REGISTERED_MODEL_MAX_RESULTS_THRESHOLD:
raise MlflowException(
"Invalid value for request parameter max_results. It must be at most "
f"{SEARCH_REGISTERED_MODEL_MAX_RESULTS_THRESHOLD}, but got value {max_results}",
INVALID_PARAMETER_VALUE,
)
parsed_filters = SearchModelUtils.parse_search_filter(filter_string)
filter_query = self._get_search_registered_model_filter_query(
parsed_filters, self.engine.dialect.name
)
parsed_orderby = self._parse_search_registered_models_order_by(order_by)
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
# we query for max_results + 1 items to check whether there is another page to return.
# this remediates having to make another query which returns no items.
max_results_for_query = max_results + 1
with self.ManagedSessionMaker() as session:
query = (
filter_query.options(*self._get_eager_registered_model_query_options())
.order_by(*parsed_orderby)
.limit(max_results_for_query)
)
if page_token:
query = query.offset(offset)
sql_registered_models = session.execute(query).scalars(SqlRegisteredModel).all()
next_page_token = self._compute_next_token(
max_results_for_query, len(sql_registered_models), offset, max_results
)
rm_entities = [rm.to_mlflow_entity() for rm in sql_registered_models][:max_results]
return PagedList(rm_entities, next_page_token)
@classmethod
def _get_search_registered_model_filter_query(cls, parsed_filters, dialect):
attribute_filters = []
tag_filters = {}
for f in parsed_filters:
type_ = f["type"]
key = f["key"]
comparator = f["comparator"]
value = f["value"]
if type_ == "attribute":
if key != "name":
raise MlflowException(
f"Invalid attribute name: {key}", error_code=INVALID_PARAMETER_VALUE
)
if comparator not in ("=", "!=", "LIKE", "ILIKE"):
raise MlflowException(
f"Invalid comparator for attribute: {comparator}",
error_code=INVALID_PARAMETER_VALUE,
)
attr = getattr(SqlRegisteredModel, key)
attr_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(attr, value)
attribute_filters.append(attr_filter)
elif type_ == "tag":
if comparator not in ("=", "!=", "LIKE", "ILIKE"):
raise MlflowException.invalid_parameter_value(
f"Invalid comparator for tag: {comparator}"
)
if key not in tag_filters:
key_filter = SearchUtils.get_sql_comparison_func("=", dialect)(
SqlRegisteredModelTag.key, key
)
tag_filters[key] = [key_filter]
val_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(
SqlRegisteredModelTag.value, value
)
tag_filters[key].append(val_filter)
else:
raise MlflowException(
f"Invalid token type: {type_}", error_code=INVALID_PARAMETER_VALUE
)
rm_query = select(SqlRegisteredModel).filter(*attribute_filters)
if not cls._is_querying_prompt(parsed_filters):
rm_query = cls._update_query_to_exclude_prompts(
rm_query, tag_filters, dialect, SqlRegisteredModel, SqlRegisteredModelTag
)
if tag_filters:
sql_tag_filters = (sqlalchemy.and_(*x) for x in tag_filters.values())
tag_filter_query = (
select(SqlRegisteredModelTag.name)
.filter(sqlalchemy.or_(*sql_tag_filters))
.group_by(SqlRegisteredModelTag.name)
.having(sqlalchemy.func.count(sqlalchemy.literal(1)) == len(tag_filters))
.subquery()
)
return rm_query.join(
tag_filter_query, SqlRegisteredModel.name == tag_filter_query.c.name
)
else:
return rm_query
@classmethod
def _get_search_model_versions_filter_clauses(cls, parsed_filters, dialect):
attribute_filters = []
tag_filters = {}
for f in parsed_filters:
type_ = f["type"]
key = f["key"]
comparator = f["comparator"]
value = f["value"]
if type_ == "attribute":
if key not in SearchModelVersionUtils.VALID_SEARCH_ATTRIBUTE_KEYS:
raise MlflowException(
f"Invalid attribute name: {key}", error_code=INVALID_PARAMETER_VALUE
)
if key in SearchModelVersionUtils.NUMERIC_ATTRIBUTES:
if (
comparator
not in SearchModelVersionUtils.VALID_NUMERIC_ATTRIBUTE_COMPARATORS
):
raise MlflowException(
f"Invalid comparator for attribute {key}: {comparator}",
error_code=INVALID_PARAMETER_VALUE,
)
elif (
comparator not in SearchModelVersionUtils.VALID_STRING_ATTRIBUTE_COMPARATORS
or (comparator == "IN" and key != "run_id")
):
raise MlflowException(
f"Invalid comparator for attribute: {comparator}",
error_code=INVALID_PARAMETER_VALUE,
)
if key == "source_path":
key_name = "source"
elif key == "version_number":
key_name = "version"
else:
key_name = key
attr = getattr(SqlModelVersion, key_name)
if comparator == "IN":
# Note: Here the run_id values in databases contain only lower case letters,
# so we already filter out comparison values containing upper case letters
# in `SearchModelUtils._get_value`. This addresses MySQL IN clause case
# in-sensitive issue.
val_filter = attr.in_(value)
else:
val_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(
attr, value
)
attribute_filters.append(val_filter)
elif type_ == "tag":
if comparator not in ("=", "!=", "LIKE", "ILIKE"):
raise MlflowException.invalid_parameter_value(
f"Invalid comparator for tag: {comparator}",
)
if key not in tag_filters:
key_filter = SearchUtils.get_sql_comparison_func("=", dialect)(
SqlModelVersionTag.key, key
)
tag_filters[key] = [key_filter]
val_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(
SqlModelVersionTag.value, value
)
tag_filters[key].append(val_filter)
else:
raise MlflowException(
f"Invalid token type: {type_}", error_code=INVALID_PARAMETER_VALUE
)
mv_query = select(SqlModelVersion).filter(*attribute_filters)
if not cls._is_querying_prompt(parsed_filters):
mv_query = cls._update_query_to_exclude_prompts(
mv_query, tag_filters, dialect, SqlModelVersion, SqlModelVersionTag
)
if tag_filters:
sql_tag_filters = (sqlalchemy.and_(*x) for x in tag_filters.values())
tag_filter_query = (
select(SqlModelVersionTag.name, SqlModelVersionTag.version)
.filter(sqlalchemy.or_(*sql_tag_filters))
.group_by(SqlModelVersionTag.name, SqlModelVersionTag.version)
.having(sqlalchemy.func.count(sqlalchemy.literal(1)) == len(tag_filters))
.subquery()
)
return mv_query.join(
tag_filter_query,
sqlalchemy.and_(
SqlModelVersion.name == tag_filter_query.c.name,
SqlModelVersion.version == tag_filter_query.c.version,
),
)
else:
return mv_query
@classmethod
def _update_query_to_exclude_prompts(
cls,
query: Any,
tag_filters: dict[str, list[Any]],
dialect: str,
main_db_model: SqlModelVersion | SqlRegisteredModel,
tag_db_model: SqlModelVersionTag | SqlRegisteredModelTag,
):
"""
Update query to exclude all prompt rows and return only normal model or model versions.
Prompts and normal models are distinguished by the `mlflow.prompt.is_prompt` tag.
The search API should only return normal models by default. However, simply filtering
rows using the tag like this does not work because models do not have the prompt tag.
tags.`mlflow.prompt.is_prompt` != 'true'
tags.`mlflow.prompt.is_prompt` = 'false'
To workaround this, we need to use a subquery to get all prompt rows and then use an
anti-join for excluding prompts.
"""
# If the tag filter contains the prompt tag, remove it
tag_filters.pop(IS_PROMPT_TAG_KEY, [])
# Filter to get all prompt rows
equal = SearchUtils.get_sql_comparison_func("=", dialect)
prompts_subquery = (
select(tag_db_model.name)
.filter(
equal(tag_db_model.key, IS_PROMPT_TAG_KEY),
equal(tag_db_model.value, "true"),
)
.group_by(tag_db_model.name)
.subquery()
)
return query.join(
prompts_subquery, main_db_model.name == prompts_subquery.c.name, isouter=True
).filter(prompts_subquery.c.name.is_(None))
@classmethod
def _is_querying_prompt(cls, parsed_filters: list[dict[str, Any]]) -> bool:
for f in parsed_filters:
if f["type"] != "tag" or f["key"] != IS_PROMPT_TAG_KEY:
continue
return (f["comparator"] == "=" and f["value"].lower() == "true") or (
f["comparator"] == "!=" and f["value"].lower() == "false"
)
# Query should return only normal models by default
return False
@classmethod
def _parse_search_registered_models_order_by(cls, order_by_list):
"""Sorts a set of registered models based on their natural ordering and an overriding set
of order_bys. Registered models are naturally ordered first by name ascending.
"""
clauses = []
observed_order_by_clauses = set()
if order_by_list:
for order_by_clause in order_by_list:
(
attribute_token,
ascending,
) = SearchUtils.parse_order_by_for_search_registered_models(order_by_clause)
if attribute_token == SqlRegisteredModel.name.key:
field = SqlRegisteredModel.name
elif attribute_token in SearchUtils.VALID_TIMESTAMP_ORDER_BY_KEYS:
field = SqlRegisteredModel.last_updated_time
else:
raise MlflowException(
f"Invalid order by key '{attribute_token}' specified."
+ "Valid keys are "
+ f"'{SearchUtils.RECOMMENDED_ORDER_BY_KEYS_REGISTERED_MODELS}'",
error_code=INVALID_PARAMETER_VALUE,
)
if field.key in observed_order_by_clauses:
raise MlflowException(f"`order_by` contains duplicate fields: {order_by_list}")
observed_order_by_clauses.add(field.key)
if ascending:
clauses.append(field.asc())
else:
clauses.append(field.desc())
if SqlRegisteredModel.name.key not in observed_order_by_clauses:
clauses.append(SqlRegisteredModel.name.asc())
return clauses
def get_registered_model(self, name):
"""
Get registered model instance by name.
Args:
name: Registered model name.
Returns:
A single :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
with self.ManagedSessionMaker() as session:
return self._get_registered_model(session, name, eager=True).to_mlflow_entity()
def get_latest_versions(self, name, stages=None):
"""
Latest version models for each requested stage. If no ``stages`` argument is provided,
returns the latest version for each stage.
Args:
name: Registered model name.
stages: List of desired stages. If input list is None, return latest versions for
each stage.
Returns:
List of :py:class:`mlflow.entities.model_registry.ModelVersion` objects.
"""
with self.ManagedSessionMaker() as session:
sql_registered_model = self._get_registered_model(session, name)
# Convert to RegisteredModel entity first and then extract latest_versions
latest_versions = sql_registered_model.to_mlflow_entity().latest_versions
if stages is None or len(stages) == 0:
expected_stages = {get_canonical_stage(stage) for stage in ALL_STAGES}
else:
expected_stages = {get_canonical_stage(stage) for stage in stages}
mvs = [mv for mv in latest_versions if mv.current_stage in expected_stages]
# Populate aliases for each model version
for mv in mvs:
model_aliases = sql_registered_model.registered_model_aliases
mv.aliases = [alias.alias for alias in model_aliases if alias.version == mv.version]
return mvs
@classmethod
def _get_registered_model_tag(cls, session, name, key):
tags = (
session.query(SqlRegisteredModelTag)
.filter(SqlRegisteredModelTag.name == name, SqlRegisteredModelTag.key == key)
.all()
)
if len(tags) == 0:
return None
if len(tags) > 1:
raise MlflowException(
f"Expected only 1 registered model tag with name={name}, key={key}. "
f"Found {len(tags)}.",
INVALID_STATE,
)
return tags[0]
def set_registered_model_tag(self, name, tag):
"""
Set a tag for the registered model.
Args:
name: Registered model name.
tag: :py:class:`mlflow.entities.model_registry.RegisteredModelTag` instance to log.
Returns:
None
"""
_validate_model_name(name)
_validate_registered_model_tag(tag.key, tag.value)
with self.ManagedSessionMaker() as session:
# check if registered model exists
self._get_registered_model(session, name)
session.merge(SqlRegisteredModelTag(name=name, key=tag.key, value=tag.value))
def delete_registered_model_tag(self, name, key):
"""
Delete a tag associated with the registered model.
Args:
name: Registered model name.
key: Registered model tag key.
Returns:
None
"""
_validate_model_name(name)
_validate_tag_name(key)
with self.ManagedSessionMaker() as session:
# check if registered model exists
self._get_registered_model(session, name)
existing_tag = self._get_registered_model_tag(session, name, key)
if existing_tag is not None:
session.delete(existing_tag)
# CRUD API for ModelVersion objects
def create_model_version(
self,
name,
source,
run_id=None,
tags=None,
run_link=None,
description=None,
local_model_path=None,
model_id: str | None = None,
):
"""
Create a new model version from given source and run ID.
Args:
name: Registered model name.
source: URI indicating the location of the model artifacts.
run_id: Run ID from MLflow tracking server that generated the model.
tags: A list of :py:class:`mlflow.entities.model_registry.ModelVersionTag`
instances associated with this model version.
run_link: Link to the run from an MLflow tracking server that generated this model.
description: Description of the version.
local_model_path: Unused.
model_id: The ID of the model (from an Experiment) that is being promoted to a
registered model version, if applicable.
Returns:
A single object of :py:class:`mlflow.entities.model_registry.ModelVersion`
created in the backend.
"""
def next_version(sql_registered_model):
if sql_registered_model.model_versions:
return max(mv.version for mv in sql_registered_model.model_versions) + 1
else:
return 1
_validate_model_name(name)
for tag in tags or []:
_validate_model_version_tag(tag.key, tag.value)
storage_location = source
if urllib.parse.urlparse(source).scheme == "models":
parsed_model_uri = _parse_model_uri(source)
try:
if parsed_model_uri.model_id is not None:
# TODO: Propagate tracking URI to file sqlalchemy directly, rather than relying
# on global URI (individual MlflowClient instances may have different tracking
# URIs)
model = MlflowClient().get_logged_model(parsed_model_uri.model_id)
storage_location = model.artifact_location
run_id = run_id or model.source_run_id
else:
storage_location = self.get_model_version_download_uri(
parsed_model_uri.name, parsed_model_uri.version
)
except Exception as e:
raise MlflowException(
f"Unable to fetch model from model URI source artifact location '{source}'."
f"Error: {e}"
) from e
if not run_id and model_id:
model = MlflowClient().get_logged_model(model_id)
run_id = model.source_run_id
with self.ManagedSessionMaker() as session:
creation_time = get_current_time_millis()
for attempt in range(self.CREATE_MODEL_VERSION_RETRIES):
try:
sql_registered_model = self._get_registered_model(session, name)
sql_registered_model.last_updated_time = creation_time
version = next_version(sql_registered_model)
model_version = SqlModelVersion(
name=name,
version=version,
creation_time=creation_time,
last_updated_time=creation_time,
source=source,
storage_location=storage_location,
run_id=run_id,
run_link=run_link,
description=description,
)
tags_dict = {}
for tag in tags or []:
tags_dict[tag.key] = tag.value
model_version.model_version_tags = [
SqlModelVersionTag(key=key, value=value) for key, value in tags_dict.items()
]
session.add_all([sql_registered_model, model_version])
session.flush()
return self._populate_model_version_aliases(
session, name, model_version.to_mlflow_entity()
)
except sqlalchemy.exc.IntegrityError:
more_retries = self.CREATE_MODEL_VERSION_RETRIES - attempt - 1
_logger.info(
"Model Version creation error (name=%s) Retrying %s more time%s.",
name,
str(more_retries),
"s" if more_retries > 1 else "",
)
raise MlflowException(
f"Model Version creation error (name={name}). Giving up after "
f"{self.CREATE_MODEL_VERSION_RETRIES} attempts."
)
@classmethod
def _populate_model_version_aliases(cls, session, name, version):
model_aliases = cls._get_registered_model(session, name).registered_model_aliases
version.aliases = [
alias.alias for alias in model_aliases if alias.version == version.version
]
return version
@classmethod
def _get_model_version_from_db(cls, session, name, version, conditions, query_options=None):
if query_options is None:
query_options = []
versions = session.query(SqlModelVersion).options(*query_options).filter(*conditions).all()
if len(versions) == 0:
raise MlflowException(
f"Model Version (name={name}, version={version}) not found",
RESOURCE_DOES_NOT_EXIST,
)
if len(versions) > 1:
raise MlflowException(
f"Expected only 1 model version with (name={name}, version={version}). "
f"Found {len(versions)}.",
INVALID_STATE,
)
return versions[0]
@classmethod
def _get_sql_model_version(cls, session, name, version, eager=False):
"""
Args:
eager: If ``True``, eagerly loads the model version's tags.
If ``False``, these attributes are not eagerly loaded and
will be loaded when their corresponding object properties
are accessed from the resulting ``SqlModelVersion`` object.
"""
_validate_model_name(name)
_validate_model_version(version)
query_options = cls._get_eager_model_version_query_options() if eager else []
conditions = [
SqlModelVersion.name == name,
SqlModelVersion.version == version,
SqlModelVersion.current_stage != STAGE_DELETED_INTERNAL,
]
return cls._get_model_version_from_db(session, name, version, conditions, query_options)
def _get_sql_model_version_including_deleted(self, name, version):
"""
Private method to retrieve model versions including those that are internally deleted.
Used in tests to verify redaction behavior on deletion.
Args:
name: Registered model name.
version: Registered model version.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
with self.ManagedSessionMaker() as session:
conditions = [
SqlModelVersion.name == name,
SqlModelVersion.version == version,
]
sql_model_version = self._get_model_version_from_db(session, name, version, conditions)
return self._populate_model_version_aliases(
session, name, sql_model_version.to_mlflow_entity()
)
def update_model_version(self, name, version, description=None):
"""
Update metadata associated with a model version in backend.
Args:
name: Registered model name.
version: Registered model version.
description: New model description.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
with self.ManagedSessionMaker() as session:
updated_time = get_current_time_millis()
sql_model_version = self._get_sql_model_version(session, name=name, version=version)
sql_model_version.description = description
sql_model_version.last_updated_time = updated_time
session.add(sql_model_version)
return self._populate_model_version_aliases(
session, name, sql_model_version.to_mlflow_entity()
)
def transition_model_version_stage(self, name, version, stage, archive_existing_versions):
"""
Update model version stage.
Args:
name: Registered model name.
version: Registered model version.
stage: New desired stage for this model version.
archive_existing_versions: If this flag is set to ``True``, all existing model
versions in the stage will be automatically moved to the "archived" stage. Only
valid when ``stage`` is ``"staging"`` or ``"production"`` otherwise an error will
be raised.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
is_active_stage = get_canonical_stage(stage) in DEFAULT_STAGES_FOR_GET_LATEST_VERSIONS
if archive_existing_versions and not is_active_stage:
msg_tpl = (
"Model version transition cannot archive existing model versions "
"because '{}' is not an Active stage. Valid stages are {}"
)
raise MlflowException(msg_tpl.format(stage, DEFAULT_STAGES_FOR_GET_LATEST_VERSIONS))
with self.ManagedSessionMaker() as session:
last_updated_time = get_current_time_millis()
model_versions = []
if archive_existing_versions:
conditions = [
SqlModelVersion.name == name,
SqlModelVersion.version != version,
SqlModelVersion.current_stage == get_canonical_stage(stage),
]
model_versions = session.query(SqlModelVersion).filter(*conditions).all()
for mv in model_versions:
mv.current_stage = STAGE_ARCHIVED
mv.last_updated_time = last_updated_time
sql_model_version = self._get_sql_model_version(
session=session, name=name, version=version
)
sql_model_version.current_stage = get_canonical_stage(stage)
sql_model_version.last_updated_time = last_updated_time
sql_registered_model = sql_model_version.registered_model
sql_registered_model.last_updated_time = last_updated_time
session.add_all([*model_versions, sql_model_version, sql_registered_model])
return self._populate_model_version_aliases(
session, name, sql_model_version.to_mlflow_entity()
)
def delete_model_version(self, name, version):
"""
Delete model version in backend.
Args:
name: Registered model name.
version: Registered model version.
Returns:
None
"""
# currently delete model version still keeps the tags associated with the version
with self.ManagedSessionMaker() as session:
updated_time = get_current_time_millis()
sql_model_version = self._get_sql_model_version(session, name, version)
sql_registered_model = sql_model_version.registered_model
sql_registered_model.last_updated_time = updated_time
aliases = sql_registered_model.registered_model_aliases
for alias in aliases:
if alias.version == version:
session.delete(alias)
sql_model_version.current_stage = STAGE_DELETED_INTERNAL
sql_model_version.last_updated_time = updated_time
sql_model_version.description = None
sql_model_version.user_id = None
sql_model_version.source = "REDACTED-SOURCE-PATH"
sql_model_version.run_id = "REDACTED-RUN-ID"
sql_model_version.run_link = "REDACTED-RUN-LINK"
sql_model_version.status_message = None
session.add_all([sql_registered_model, sql_model_version])
def get_model_version(self, name, version):
"""
Get the model version instance by name and version.
Args:
name: Registered model name.
version: Registered model version.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
with self.ManagedSessionMaker() as session:
sql_model_version = self._get_sql_model_version(session, name, version, eager=True)
return self._populate_model_version_aliases(
session, name, sql_model_version.to_mlflow_entity()
)
def get_model_version_download_uri(self, name, version):
"""
Get the download location in Model Registry for this model version.
NOTE: For first version of Model Registry, since the models are not copied over to another
location, download URI points to input source path.
Args:
name: Registered model name.
version: Registered model version.
Returns:
A single URI location that allows reads for downloading.
"""
with self.ManagedSessionMaker() as session:
sql_model_version = self._get_sql_model_version(session, name, version)
return sql_model_version.storage_location or sql_model_version.source
def search_model_versions(
self,
filter_string=None,
max_results=SEARCH_MODEL_VERSION_MAX_RESULTS_DEFAULT,
order_by=None,
page_token=None,
):
"""
Search for model versions in backend that satisfy the filter criteria.
Args:
filter_string: A filter string expression. Currently supports a single filter
condition either name of model like ``name = 'model_name'`` or
``run_id = '...'``.
max_results: Maximum number of model versions desired.
order_by: List of column names with ASC|DESC annotation, to be used for ordering
matching search results.
page_token: Token specifying the next page of results. It should be obtained from
a ``search_model_versions`` call.
Returns:
A PagedList of :py:class:`mlflow.entities.model_registry.ModelVersion`
objects that satisfy the search expressions. The pagination token for the next
page can be obtained via the ``token`` attribute of the object.
"""
if not isinstance(max_results, int) or max_results < 1:
raise MlflowException(
"Invalid value for max_results. It must be a positive integer,"
f" but got {max_results}",
INVALID_PARAMETER_VALUE,
)
if max_results > SEARCH_MODEL_VERSION_MAX_RESULTS_THRESHOLD:
raise MlflowException(
"Invalid value for request parameter max_results. It must be at most "
f"{SEARCH_MODEL_VERSION_MAX_RESULTS_THRESHOLD}, but got value {max_results}",
INVALID_PARAMETER_VALUE,
)
parsed_filters = SearchModelVersionUtils.parse_search_filter(filter_string)
filter_query = self._get_search_model_versions_filter_clauses(
parsed_filters, self.engine.dialect.name
)
parsed_orderby = self._parse_search_model_versions_order_by(
order_by or ["last_updated_timestamp DESC", "name ASC", "version_number DESC"]
)
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
# we query for max_results + 1 items to check whether there is another page to return.
# this remediates having to make another query which returns no items.
max_results_for_query = max_results + 1
with self.ManagedSessionMaker() as session:
query = (
filter_query.options(*self._get_eager_model_version_query_options())
.filter(SqlModelVersion.current_stage != STAGE_DELETED_INTERNAL)
.order_by(*parsed_orderby)
.limit(max_results_for_query)
)
if page_token:
query = query.offset(offset)
sql_model_versions = session.execute(query).scalars(SqlModelVersion).all()
next_page_token = self._compute_next_token(
max_results_for_query, len(sql_model_versions), offset, max_results
)
model_versions = [mv.to_mlflow_entity() for mv in sql_model_versions][:max_results]
return PagedList(model_versions, next_page_token)
@classmethod
def _parse_search_model_versions_order_by(cls, order_by_list):
"""Sorts a set of model versions based on their natural ordering and an overriding set
of order_bys. Model versions are naturally ordered first by name ascending, then by
version ascending.
"""
clauses = []
observed_order_by_clauses = set()
if order_by_list:
for order_by_clause in order_by_list:
(
_,
key,
ascending,
) = SearchModelVersionUtils.parse_order_by_for_search_model_versions(
order_by_clause
)
if key not in SearchModelVersionUtils.VALID_ORDER_BY_ATTRIBUTE_KEYS:
raise MlflowException(
f"Invalid order by key '{key}' specified. "
"Valid keys are "
f"{SearchModelVersionUtils.VALID_ORDER_BY_ATTRIBUTE_KEYS}",
error_code=INVALID_PARAMETER_VALUE,
)
else:
if key == "version_number":
field = SqlModelVersion.version
elif key == "creation_timestamp":
field = SqlModelVersion.creation_time
elif key == "last_updated_timestamp":
field = SqlModelVersion.last_updated_time
else:
field = getattr(SqlModelVersion, key)
if field.key in observed_order_by_clauses:
raise MlflowException(f"`order_by` contains duplicate fields: {order_by_list}")
observed_order_by_clauses.add(field.key)
if ascending:
clauses.append(field.asc())
else:
clauses.append(field.desc())
if SqlModelVersion.name.key not in observed_order_by_clauses:
clauses.append(SqlModelVersion.name.asc())
if SqlModelVersion.version.key not in observed_order_by_clauses:
clauses.append(SqlModelVersion.version.desc())
return clauses
@classmethod
def _get_model_version_tag(cls, session, name, version, key):
tags = (
session.query(SqlModelVersionTag)
.filter(
SqlModelVersionTag.name == name,
SqlModelVersionTag.version == version,
SqlModelVersionTag.key == key,
)
.all()
)
if len(tags) == 0:
return None
if len(tags) > 1:
raise MlflowException(
f"Expected only 1 model version tag with name={name}, version={version}, "
f"key={key}. Found {len(tags)}.",
INVALID_STATE,
)
return tags[0]
def set_model_version_tag(self, name, version, tag):
"""
Set a tag for the model version.
Args:
name: Registered model name.
version: Registered model version.
tag: :py:class:`mlflow.entities.model_registry.ModelVersionTag` instance to log.
Returns:
None
"""
_validate_model_name(name)
_validate_model_version(version)
_validate_model_version_tag(tag.key, tag.value)
with self.ManagedSessionMaker() as session:
# check if model version exists
self._get_sql_model_version(session, name, version)
session.merge(
SqlModelVersionTag(name=name, version=version, key=tag.key, value=tag.value)
)
def delete_model_version_tag(self, name, version, key):
"""
Delete a tag associated with the model version.
Args:
name: Registered model name.
version: Registered model version.
key: Tag key.
Returns:
None
"""
_validate_model_name(name)
_validate_model_version(version)
_validate_tag_name(key)
with self.ManagedSessionMaker() as session:
# check if model version exists
self._get_sql_model_version(session, name, version)
existing_tag = self._get_model_version_tag(session, name, version, key)
if existing_tag is not None:
session.delete(existing_tag)
@classmethod
def _get_registered_model_alias(cls, session, name, alias):
return (
session.query(SqlRegisteredModelAlias)
.filter(
SqlRegisteredModelAlias.name == name,
SqlRegisteredModelAlias.alias == alias,
)
.first()
)
def set_registered_model_alias(self, name, alias, version):
"""
Set a registered model alias pointing to a model version.
Args:
name: Registered model name.
alias: Name of the alias.
version: Registered model version number.
Returns:
None
"""
_validate_model_name(name)
_validate_model_alias_name(alias)
_validate_model_alias_name_reserved(alias)
_validate_model_version(version)
with self.ManagedSessionMaker() as session:
# check if model version exists
self._get_sql_model_version(session, name, version)
session.merge(SqlRegisteredModelAlias(name=name, alias=alias, version=version))
def delete_registered_model_alias(self, name, alias):
"""
Delete an alias associated with a registered model.
Args:
name: Registered model name.
alias: Name of the alias.
Returns:
None
"""
_validate_model_name(name)
_validate_model_alias_name(alias)
with self.ManagedSessionMaker() as session:
# check if registered model exists
self._get_registered_model(session, name)
existing_alias = self._get_registered_model_alias(session, name, alias)
if existing_alias is not None:
session.delete(existing_alias)
def get_model_version_by_alias(self, name, alias):
"""
Get the model version instance by name and alias.
Args:
name: Registered model name.
alias: Name of the alias.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
_validate_model_name(name)
_validate_model_alias_name(alias)
if alias.lower() == _REGISTERED_MODEL_ALIAS_LATEST:
if versions := self.get_latest_versions(name):
return versions[0]
else:
raise MlflowException(
f"Latest version not found for model {name}.", RESOURCE_DOES_NOT_EXIST
)
with self.ManagedSessionMaker() as session:
existing_alias = self._get_registered_model_alias(session, name, alias)
if existing_alias is not None:
sql_model_version = self._get_sql_model_version(
session, existing_alias.name, existing_alias.version
)
return self._populate_model_version_aliases(
session, name, sql_model_version.to_mlflow_entity()
)
else:
raise MlflowException(
f"Registered model alias {alias} not found.", INVALID_PARAMETER_VALUE
)
def _await_model_version_creation(self, mv, await_creation_for):
"""
Does not wait for the model version to become READY as a successful creation will
immediately place the model version in a READY state.
"""
# Webhook CRUD operations
def create_webhook(
self,
name: str,
url: str,
events: list[WebhookEvent],
description: str | None = None,
secret: str | None = None,
status: WebhookStatus | None = None,
) -> Webhook:
_validate_webhook_name(name)
_validate_webhook_url(url)
_validate_webhook_events(events)
with self.ManagedSessionMaker() as session:
webhook_id = str(uuid.uuid4())
creation_time = get_current_time_millis()
webhook = SqlWebhook(
webhook_id=webhook_id,
name=name,
url=url,
description=description,
secret=secret,
status=(status or WebhookStatus.ACTIVE).value,
creation_timestamp=creation_time,
last_updated_timestamp=creation_time,
)
session.add(webhook)
session.add_all(
SqlWebhookEvent(webhook_id=webhook_id, entity=e.entity.value, action=e.action.value)
for e in events
)
session.flush()
return webhook.to_mlflow_entity()
def get_webhook(self, webhook_id: str) -> Webhook:
with self.ManagedSessionMaker() as session:
webhook = self._get_webhook_by_id(session, webhook_id)
return webhook.to_mlflow_entity()
def list_webhooks(
self,
max_results: int | None = None,
page_token: str | None = None,
) -> PagedList[Webhook]:
max_results = max_results or 100
if max_results < 1 or max_results > 1000:
raise MlflowException(
"max_results must be between 1 and 1000.", INVALID_PARAMETER_VALUE
)
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
with self.ManagedSessionMaker() as session:
query = (
session.query(SqlWebhook)
.filter(SqlWebhook.deleted_timestamp.is_(None))
.order_by(SqlWebhook.creation_timestamp.desc())
.limit(max_results + 1)
)
if page_token:
query = query.offset(offset)
webhooks = query.all()
# Check if there's a next page
has_next_page = len(webhooks) > max_results
next_page_token = None
if has_next_page:
webhooks = webhooks[:max_results]
next_page_token = SearchUtils.create_page_token(offset + max_results)
return PagedList([w.to_mlflow_entity() for w in webhooks], next_page_token)
def list_webhooks_by_event(
self,
event: WebhookEvent,
max_results: int | None = None,
page_token: str | None = None,
) -> PagedList[Webhook]:
max_results = max_results or 100
if max_results < 1 or max_results > 1000:
raise MlflowException(
"max_results must be between 1 and 1000.", INVALID_PARAMETER_VALUE
)
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
with self.ManagedSessionMaker() as session:
# Query webhooks that have the specific event in their related webhook_events
query = (
session.query(SqlWebhook)
.join(SqlWebhookEvent)
.filter(SqlWebhook.deleted_timestamp.is_(None))
.filter(SqlWebhookEvent.entity == event.entity.value)
.filter(SqlWebhookEvent.action == event.action.value)
.order_by(SqlWebhook.creation_timestamp.desc())
.limit(max_results + 1)
)
if page_token:
query = query.offset(offset)
webhooks = query.all()
# Check if there's a next page
has_next_page = len(webhooks) > max_results
next_page_token = None
if has_next_page:
webhooks = webhooks[:max_results]
next_page_token = SearchUtils.create_page_token(offset + max_results)
return PagedList([w.to_mlflow_entity() for w in webhooks], next_page_token)
def update_webhook(
self,
webhook_id: str,
name: str | None = None,
description: str | None = None,
url: str | None = None,
events: list[WebhookEvent] | None = None,
secret: str | None = None,
status: WebhookStatus | None = None,
) -> Webhook:
with self.ManagedSessionMaker() as session:
webhook = self._get_webhook_by_id(session, webhook_id)
# Update fields if provided
if name is not None:
_validate_webhook_name(name)
webhook.name = name
if url is not None:
_validate_webhook_url(url)
webhook.url = url
if events is not None:
_validate_webhook_events(events)
# Delete existing webhook events
session.query(SqlWebhookEvent).filter(
SqlWebhookEvent.webhook_id == webhook_id
).delete()
# Create new webhook events
session.add_all(
SqlWebhookEvent(
webhook_id=webhook_id, entity=e.entity.value, action=e.action.value
)
for e in events
)
if description is not None:
webhook.description = description
if secret is not None:
webhook.secret = secret
if status is not None:
webhook.status = status.value
webhook.last_updated_timestamp = get_current_time_millis()
session.add(webhook)
session.flush()
return webhook.to_mlflow_entity()
def delete_webhook(self, webhook_id: str) -> None:
with self.ManagedSessionMaker() as session:
webhook = self._get_webhook_by_id(session, webhook_id)
# Soft delete by setting deleted_timestamp
webhook.deleted_timestamp = get_current_time_millis()
webhook.last_updated_timestamp = webhook.deleted_timestamp
session.add(webhook)
session.flush()
# Helper methods for webhooks
def _get_webhook_by_id(self, session: Session, webhook_id: str) -> SqlWebhook:
if webhook := (
session.query(SqlWebhook)
.filter(
SqlWebhook.webhook_id == webhook_id,
SqlWebhook.deleted_timestamp.is_(None),
)
.first()
):
return webhook
raise MlflowException(f"Webhook with ID {webhook_id} not found.", RESOURCE_DOES_NOT_EXIST)
| SqlAlchemyStore |
python | tensorflow__tensorflow | tensorflow/python/eager/monitoring.py | {
"start": 10839,
"end": 11656
} | class ____(Metric):
"""A stateful class for updating a gauge-like bool metric.
This class encapsulates a set of boolean values (or a single value for a
label-less metric). Each value is identified by a tuple of labels. The class
allows the user to set each value.
"""
__slots__ = []
def __init__(self, name, description, *labels):
"""Creates a new BoolGauge.
Args:
name: name of the new metric.
description: description of the new metric.
*labels: The label list of the new metric.
"""
super(BoolGauge, self).__init__('BoolGauge', _bool_gauge_methods,
len(labels), name, description, *labels)
def get_cell(self, *labels):
"""Retrieves the cell."""
return BoolGaugeCell(super(BoolGauge, self).get_cell(*labels))
| BoolGauge |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_itertools.py | {
"start": 101723,
"end": 110485
} | class ____(__TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_batched(self):
s = 'abcde'
r = [('a', 'b'), ('c', 'd'), ('e',)]
n = 2
for g in (G, I, Ig, L, R):
with self.subTest(g=g):
self.assertEqual(list(batched(g(s), n)), r)
self.assertEqual(list(batched(S(s), 2)), [])
self.assertRaises(TypeError, batched, X(s), 2)
self.assertRaises(TypeError, batched, N(s), 2)
self.assertRaises(ZeroDivisionError, list, batched(E(s), 2))
self.assertRaises(ZeroDivisionError, list, batched(E2(s), 4))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_pairwise(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
seq = list(g(s))
expected = list(zip(seq, seq[1:]))
actual = list(pairwise(g(s)))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, pairwise, X(s))
self.assertRaises(TypeError, pairwise, N(s))
self.assertRaises(ZeroDivisionError, list, pairwise(E(s)))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
| TestVariousIteratorArgs |
python | python__mypy | mypy/traverser.py | {
"start": 29016,
"end": 29414
} | class ____(FuncCollectorBase):
def __init__(self) -> None:
super().__init__()
self.return_statements: list[ReturnStmt] = []
def visit_return_stmt(self, stmt: ReturnStmt) -> None:
self.return_statements.append(stmt)
def all_return_statements(node: Node) -> list[ReturnStmt]:
v = ReturnCollector()
node.accept(v)
return v.return_statements
| ReturnCollector |
python | django__django | tests/queries/models.py | {
"start": 16331,
"end": 16525
} | class ____(models.Model):
employer = models.ForeignKey(Company, models.CASCADE)
employee = models.ForeignKey(Person, models.CASCADE)
title = models.CharField(max_length=128)
| Employment |
python | geekcomputers__Python | rearrange-files/rearrange-files.py | {
"start": 139,
"end": 1416
} | class ____(object):
def __init__(self):
self.folder_path = os.getcwd()
self.list_of_all_files = os.listdir(self.folder_path)
def make_folder_and_return_name(self, foldername):
if os.path.exists(foldername) is False:
os.mkdir(foldername)
else:
foldername = foldername + str(2)
os.mkdir(foldername)
return foldername
def check_folder_existance(self):
for i in range(len(self.list_of_all_files)):
if self.list_of_all_files[i].endswith(".pdf"):
if os.path.exists("pdfs"):
shutil.move(
self.folder_path + "/" + self.list_of_all_files[i],
self.folder_path + "/pdfs",
)
else:
os.mkdir("pdfs")
elif self.list_of_all_files[i].endswith("jpg"):
if os.path.exists("jpgs"):
shutil.move(
self.folder_path + "/" + self.list_of_all_files[i],
self.folder_path + "/jpgs",
)
else:
os.mkdir("jpgs")
if __name__ == "__main__":
re = RearrangeFile()
re.check_folder_existance()
| RearrangeFile |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 36023,
"end": 36124
} | class ____(BaseModel, extra="forbid"):
exp: "Expression" = Field(..., description="")
| ExpExpression |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_match_strftime_format.py | {
"start": 432,
"end": 2832
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.match_strftime_format"
condition_value_keys = ("strftime_format",)
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, strftime_format, **kwargs):
def is_parseable_by_format(val):
try:
datetime.strptime(val, strftime_format) # noqa: DTZ007 # FIXME CoP
return True
except TypeError:
raise TypeError( # noqa: TRY003 # FIXME CoP
"Values passed to expect_column_values_to_match_strftime_format must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format." # noqa: E501 # FIXME CoP
)
except ValueError:
return False
return column.map(is_parseable_by_format)
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, strftime_format, **kwargs):
# Below is a simple validation that the provided format can both format and parse a datetime object. # noqa: E501 # FIXME CoP
# %D is an example of a format that can format but not parse, e.g.
try:
datetime.strptime( # noqa: DTZ007 # FIXME CoP
datetime.strftime(datetime.now(), strftime_format), # noqa: DTZ005 # FIXME CoP
strftime_format,
)
except ValueError as e:
raise ValueError(f"Unable to use provided strftime_format: {e!s}") # noqa: TRY003 # FIXME CoP
def is_parseable_by_format(val):
if val is None:
return False
try:
datetime.strptime(val, strftime_format) # noqa: DTZ007 # FIXME CoP
return True
except TypeError:
raise TypeError( # noqa: TRY003 # FIXME CoP
"Values passed to expect_column_values_to_match_strftime_format must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format." # noqa: E501 # FIXME CoP
)
except ValueError:
return False
success_udf = F.udf(is_parseable_by_format, pyspark.types.BooleanType())
return success_udf(column)
| ColumnValuesMatchStrftimeFormat |
python | encode__django-rest-framework | tests/test_versioning.py | {
"start": 2118,
"end": 4933
} | class ____:
def test_unversioned(self):
view = RequestVersionView.as_view()
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
def test_query_param_versioning(self):
scheme = versioning.QueryParameterVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/?version=1.2.3')
response = view(request)
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
@override_settings(ALLOWED_HOSTS=['*'])
def test_host_name_versioning(self):
scheme = versioning.HostNameVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_HOST='v1.example.org')
response = view(request)
assert response.data == {'version': 'v1'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
def test_accept_header_versioning(self):
scheme = versioning.AcceptHeaderVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version=1.2.3')
response = view(request)
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/', HTTP_ACCEPT='*/*; version=1.2.3')
response = view(request)
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json')
response = view(request)
assert response.data == {'version': None}
def test_url_path_versioning(self):
scheme = versioning.URLPathVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/1.2.3/endpoint/')
response = view(request, version='1.2.3')
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
def test_namespace_versioning(self):
class FakeResolverMatch(ResolverMatch):
namespace = 'v1'
scheme = versioning.NamespaceVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/v1/endpoint/')
request.resolver_match = FakeResolverMatch
response = view(request, version='v1')
assert response.data == {'version': 'v1'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
| TestRequestVersion |
python | pallets__click | src/click/shell_completion.py | {
"start": 12319,
"end": 20994
} | class ____(ShellComplete):
"""Shell completion for Fish."""
name = "fish"
source_template = _SOURCE_FISH
def get_completion_args(self) -> tuple[list[str], str]:
cwords = split_arg_string(os.environ["COMP_WORDS"])
incomplete = os.environ["COMP_CWORD"]
if incomplete:
incomplete = split_arg_string(incomplete)[0]
args = cwords[1:]
# Fish stores the partial word in both COMP_WORDS and
# COMP_CWORD, remove it from complete args.
if incomplete and args and args[-1] == incomplete:
args.pop()
return args, incomplete
def format_completion(self, item: CompletionItem) -> str:
if item.help:
return f"{item.type},{item.value}\t{item.help}"
return f"{item.type},{item.value}"
ShellCompleteType = t.TypeVar("ShellCompleteType", bound="type[ShellComplete]")
_available_shells: dict[str, type[ShellComplete]] = {
"bash": BashComplete,
"fish": FishComplete,
"zsh": ZshComplete,
}
def add_completion_class(
cls: ShellCompleteType, name: str | None = None
) -> ShellCompleteType:
"""Register a :class:`ShellComplete` subclass under the given name.
The name will be provided by the completion instruction environment
variable during completion.
:param cls: The completion class that will handle completion for the
shell.
:param name: Name to register the class under. Defaults to the
class's ``name`` attribute.
"""
if name is None:
name = cls.name
_available_shells[name] = cls
return cls
def get_completion_class(shell: str) -> type[ShellComplete] | None:
"""Look up a registered :class:`ShellComplete` subclass by the name
provided by the completion instruction environment variable. If the
name isn't registered, returns ``None``.
:param shell: Name the class is registered under.
"""
return _available_shells.get(shell)
def split_arg_string(string: str) -> list[str]:
"""Split an argument string as with :func:`shlex.split`, but don't
fail if the string is incomplete. Ignores a missing closing quote or
incomplete escape sequence and uses the partial token as-is.
.. code-block:: python
split_arg_string("example 'my file")
["example", "my file"]
split_arg_string("example my\\")
["example", "my"]
:param string: String to split.
.. versionchanged:: 8.2
Moved to ``shell_completion`` from ``parser``.
"""
import shlex
lex = shlex.shlex(string, posix=True)
lex.whitespace_split = True
lex.commenters = ""
out = []
try:
for token in lex:
out.append(token)
except ValueError:
# Raised when end-of-string is reached in an invalid state. Use
# the partial token as-is. The quote or escape character is in
# lex.state, not lex.token.
out.append(lex.token)
return out
def _is_incomplete_argument(ctx: Context, param: Parameter) -> bool:
"""Determine if the given parameter is an argument that can still
accept values.
:param ctx: Invocation context for the command represented by the
parsed complete args.
:param param: Argument object being checked.
"""
if not isinstance(param, Argument):
return False
assert param.name is not None
# Will be None if expose_value is False.
value = ctx.params.get(param.name)
return (
param.nargs == -1
or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE
or (
param.nargs > 1
and isinstance(value, (tuple, list))
and len(value) < param.nargs
)
)
def _start_of_option(ctx: Context, value: str) -> bool:
"""Check if the value looks like the start of an option."""
if not value:
return False
c = value[0]
return c in ctx._opt_prefixes
def _is_incomplete_option(ctx: Context, args: list[str], param: Parameter) -> bool:
"""Determine if the given parameter is an option that needs a value.
:param args: List of complete args before the incomplete value.
:param param: Option object being checked.
"""
if not isinstance(param, Option):
return False
if param.is_flag or param.count:
return False
last_option = None
for index, arg in enumerate(reversed(args)):
if index + 1 > param.nargs:
break
if _start_of_option(ctx, arg):
last_option = arg
break
return last_option is not None and last_option in param.opts
def _resolve_context(
cli: Command,
ctx_args: cabc.MutableMapping[str, t.Any],
prog_name: str,
args: list[str],
) -> Context:
"""Produce the context hierarchy starting with the command and
traversing the complete arguments. This only follows the commands,
it doesn't trigger input prompts or callbacks.
:param cli: Command being called.
:param prog_name: Name of the executable in the shell.
:param args: List of complete args before the incomplete value.
"""
ctx_args["resilient_parsing"] = True
with cli.make_context(prog_name, args.copy(), **ctx_args) as ctx:
args = ctx._protected_args + ctx.args
while args:
command = ctx.command
if isinstance(command, Group):
if not command.chain:
name, cmd, args = command.resolve_command(ctx, args)
if cmd is None:
return ctx
with cmd.make_context(
name, args, parent=ctx, resilient_parsing=True
) as sub_ctx:
ctx = sub_ctx
args = ctx._protected_args + ctx.args
else:
sub_ctx = ctx
while args:
name, cmd, args = command.resolve_command(ctx, args)
if cmd is None:
return ctx
with cmd.make_context(
name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
resilient_parsing=True,
) as sub_sub_ctx:
sub_ctx = sub_sub_ctx
args = sub_ctx.args
ctx = sub_ctx
args = [*sub_ctx._protected_args, *sub_ctx.args]
else:
break
return ctx
def _resolve_incomplete(
ctx: Context, args: list[str], incomplete: str
) -> tuple[Command | Parameter, str]:
"""Find the Click object that will handle the completion of the
incomplete value. Return the object and the incomplete value.
:param ctx: Invocation context for the command represented by
the parsed complete args.
:param args: List of complete args before the incomplete value.
:param incomplete: Value being completed. May be empty.
"""
# Different shells treat an "=" between a long option name and
# value differently. Might keep the value joined, return the "="
# as a separate item, or return the split name and value. Always
# split and discard the "=" to make completion easier.
if incomplete == "=":
incomplete = ""
elif "=" in incomplete and _start_of_option(ctx, incomplete):
name, _, incomplete = incomplete.partition("=")
args.append(name)
# The "--" marker tells Click to stop treating values as options
# even if they start with the option character. If it hasn't been
# given and the incomplete arg looks like an option, the current
# command will provide option name completions.
if "--" not in args and _start_of_option(ctx, incomplete):
return ctx.command, incomplete
params = ctx.command.get_params(ctx)
# If the last complete arg is an option name with an incomplete
# value, the option will provide value completions.
for param in params:
if _is_incomplete_option(ctx, args, param):
return param, incomplete
# It's not an option name or value. The first argument without a
# parsed value will provide value completions.
for param in params:
if _is_incomplete_argument(ctx, param):
return param, incomplete
# There were no unparsed arguments, the command may be a group that
# will provide command name completions.
return ctx.command, incomplete
| FishComplete |
python | google__pytype | pytype/rewrite/load_abstract_test.py | {
"start": 166,
"end": 400
} | class ____(test_utils.ContextfulTestBase):
def test_basic(self):
module_globals = self.ctx.abstract_loader.get_module_globals()
# Sanity check a random entry.
self.assertIn('__name__', module_globals)
| GetModuleGlobalsTest |
python | Delgan__loguru | tests/exceptions/source/diagnose/parenthesis.py | {
"start": 149,
"end": 626
} | class ____:
pass
def a(b, c):
x = XYZ()
x.val = 9
(a, b, x.val, ) = 12, 15 / c, 17
def b():
foo, bar, baz = {}, XYZ, 0
foo[("baz")] = bar() + (a(5, baz))
def c():
x = XYZ()
x.val = 123
x.val += 456 and b()
def d(j):
x, y, z = 2, 5, 3
xyz = XYZ()
xyz.val = 123
i = 12 \
; z = (x * y); y = (j or xyz.val * c() \
+ 3)
def e():
a = 1
(5 \
) + d(()) + a
with logger.catch():
e()
| XYZ |
python | django__django | tests/user_commands/tests.py | {
"start": 20672,
"end": 23125
} | class ____(SimpleTestCase):
def test_no_existent_external_program(self):
msg = "Error executing a_42_command_that_doesnt_exist_42"
with self.assertRaisesMessage(CommandError, msg):
popen_wrapper(["a_42_command_that_doesnt_exist_42"])
def test_get_random_secret_key(self):
key = get_random_secret_key()
self.assertEqual(len(key), 50)
for char in key:
self.assertIn(char, "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)")
def test_is_ignored_path_true(self):
patterns = (
["foo/bar/baz"],
["baz"],
["foo/bar/baz"],
["*/baz"],
["*"],
["b?z"],
["[abc]az"],
["*/ba[!z]/baz"],
)
for ignore_patterns in patterns:
with self.subTest(ignore_patterns=ignore_patterns):
self.assertIs(
is_ignored_path("foo/bar/baz", ignore_patterns=ignore_patterns),
True,
)
def test_is_ignored_path_false(self):
self.assertIs(
is_ignored_path(
"foo/bar/baz", ignore_patterns=["foo/bar/bat", "bar", "flub/blub"]
),
False,
)
def test_normalize_path_patterns_truncates_wildcard_base(self):
expected = [os.path.normcase(p) for p in ["foo/bar", "bar/*/"]]
self.assertEqual(normalize_path_patterns(["foo/bar/*", "bar/*/"]), expected)
def test_run_formatters_handles_oserror_for_black_path(self):
test_files_path = Path(__file__).parent / "test_files"
cases = [
(
FileNotFoundError,
str(test_files_path / "nonexistent"),
),
(
OSError if sys.platform == "win32" else PermissionError,
str(test_files_path / "black"),
),
]
for exception, location in cases:
with (
self.subTest(exception.__qualname__),
AssertFormatterFailureCaughtContext(
self, shutil_which_result=location
) as ctx,
):
run_formatters([], stderr=ctx.stderr)
parsed_error = ctx.stderr.getvalue()
self.assertIn(exception.__qualname__, parsed_error)
if sys.platform != "win32":
self.assertIn(location, parsed_error)
| UtilsTests |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/assets.py | {
"start": 1887,
"end": 1951
} | class ____(Check):
category = CheckCategory.ASSETS
| AssetsCheck |
python | pypa__pip | src/pip/_internal/models/direct_url.py | {
"start": 4421,
"end": 6555
} | class ____:
url: str
info: InfoType
subdirectory: str | None = None
def _remove_auth_from_netloc(self, netloc: str) -> str:
if "@" not in netloc:
return netloc
user_pass, netloc_no_user_pass = netloc.split("@", 1)
if (
isinstance(self.info, VcsInfo)
and self.info.vcs == "git"
and user_pass == "git"
):
return netloc
if ENV_VAR_RE.match(user_pass):
return netloc
return netloc_no_user_pass
@property
def redacted_url(self) -> str:
"""url with user:password part removed unless it is formed with
environment variables as specified in PEP 610, or it is ``git``
in the case of a git URL.
"""
purl = urllib.parse.urlsplit(self.url)
netloc = self._remove_auth_from_netloc(purl.netloc)
surl = urllib.parse.urlunsplit(
(purl.scheme, netloc, purl.path, purl.query, purl.fragment)
)
return surl
def validate(self) -> None:
self.from_dict(self.to_dict())
@classmethod
def from_dict(cls, d: dict[str, Any]) -> DirectUrl:
return DirectUrl(
url=_get_required(d, str, "url"),
subdirectory=_get(d, str, "subdirectory"),
info=_exactly_one_of(
[
ArchiveInfo._from_dict(_get(d, dict, "archive_info")),
DirInfo._from_dict(_get(d, dict, "dir_info")),
VcsInfo._from_dict(_get(d, dict, "vcs_info")),
]
),
)
def to_dict(self) -> dict[str, Any]:
res = _filter_none(
url=self.redacted_url,
subdirectory=self.subdirectory,
)
res[self.info.name] = self.info._to_dict()
return res
@classmethod
def from_json(cls, s: str) -> DirectUrl:
return cls.from_dict(json.loads(s))
def to_json(self) -> str:
return json.dumps(self.to_dict(), sort_keys=True)
def is_local_editable(self) -> bool:
return isinstance(self.info, DirInfo) and self.info.editable
| DirectUrl |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_rule_details.py | {
"start": 4281,
"end": 6226
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-rule-details"
def setUp(self) -> None:
self.rule = self.create_project_rule(project=self.project)
self.environment = self.create_environment(self.project, name="production")
self.slack_integration = install_slack(organization=self.organization)
with assume_test_silo_mode(SiloMode.CONTROL):
self.jira_integration = self.create_provider_integration(
provider="jira", name="Jira", external_id="jira:1"
)
self.jira_integration.add_organization(self.organization, self.user)
self.jira_server_integration = self.create_provider_integration(
provider="jira_server", name="Jira Server", external_id="jira_server:1"
)
self.jira_server_integration.add_organization(self.organization, self.user)
self.sentry_app = self.create_sentry_app(
name="Pied Piper",
organization=self.organization,
schema={"elements": [self.create_alert_rule_action_schema()]},
)
self.sentry_app_installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.organization
)
self.sentry_app_settings_payload = [
{"name": "title", "value": "Team Rocket"},
{"name": "summary", "value": "We're blasting off again."},
]
self.login_as(self.user)
self.notify_issue_owners_action = [
{
"targetType": "IssueOwners",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": "",
}
]
self.first_seen_condition = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
}
]
| ProjectRuleDetailsBaseTestCase |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 56923,
"end": 58938
} | class ____:
def test_gompertz_accuracy(self):
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
# sfx is sf(x). The values were computed with mpmath:
#
# from mpmath import mp
# mp.dps = 100
# def gompertz_sf(x, c):
# return mp.exp(-c*mp.expm1(x))
#
# E.g.
#
# >>> float(gompertz_sf(1, 2.5))
# 0.013626967146253437
#
@pytest.mark.parametrize('x, c, sfx', [(1, 2.5, 0.013626967146253437),
(3, 2.5, 1.8973243273704087e-21),
(0.05, 5, 0.7738668242570479),
(2.25, 5, 3.707795833465481e-19)])
def test_sf_isf(self, x, c, sfx):
assert_allclose(stats.gompertz.sf(x, c), sfx, rtol=1e-14)
assert_allclose(stats.gompertz.isf(sfx, c), x, rtol=1e-14)
def test_logcdf(self):
x = 8.0
c = 0.1
# Reference value computed with mpmath.
ref = -3.820049516821143e-130
logcdf = stats.gompertz.logcdf(x, c)
assert_allclose(logcdf, ref, rtol=5e-15)
def test_logsf(self):
x = 3e-80
c = 12
# Reference value computed with mpmath.
ref = -3.6e-79
logsf = stats.gompertz.logsf(x, c)
assert_allclose(logsf, ref, rtol=5e-15)
# reference values were computed with mpmath
# from mpmath import mp
# mp.dps = 100
# def gompertz_entropy(c):
# c = mp.mpf(c)
# return float(mp.one - mp.log(c) - mp.exp(c)*mp.e1(c))
@pytest.mark.parametrize('c, ref', [(1e-4, 1.5762523017634573),
(1, 0.4036526376768059),
(1000, -5.908754280976161),
(1e10, -22.025850930040455)])
def test_entropy(self, c, ref):
assert_allclose(stats.gompertz.entropy(c), ref, rtol=1e-14)
| TestGompertz |
python | pytorch__pytorch | torch/ao/quantization/observer.py | {
"start": 22377,
"end": 26327
} | class ____(MinMaxObserver):
r"""Observer module for computing the quantization parameters based on the
moving average of the min and max values.
This observer computes the quantization parameters based on the moving
averages of minimums and maximums of the incoming tensors. The module
records the average minimum and maximum of incoming tensors, and uses this
statistic to compute the quantization parameters.
Args:
averaging_constant: Averaging constant for min/max.
dtype: dtype argument to the `quantize` node needed to implement the
reference model spec.
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
The moving average min/max is computed as follows
.. math::
\begin{array}{ll}
x_\text{min} = \begin{cases}
\min(X) & \text{if~}x_\text{min} = \text{None} \\
(1 - c) x_\text{min} + c \min(X) & \text{otherwise}
\end{cases}\\
x_\text{max} = \begin{cases}
\max(X) & \text{if~}x_\text{max} = \text{None} \\
(1 - c) x_\text{max} + c \max(X) & \text{otherwise}
\end{cases}\\
\end{array}
where :math:`x_\text{min/max}` is the running average min/max, :math:`X` is
is the incoming tensor, and :math:`c` is the ``averaging_constant``.
The scale and zero point are then computed as in
:class:`~torch.ao.quantization.observer.MinMaxObserver`.
.. note:: Only works with ``torch.per_tensor_affine`` quantization scheme.
.. note:: If the running minimum equals to the running maximum, the scale
and zero_point are set to 1.0 and 0.
"""
def __init__(
self,
averaging_constant=0.01,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
eps=torch.finfo(torch.float32).eps,
is_dynamic=False,
**kwargs,
) -> None:
if not is_per_tensor(qscheme):
raise NotImplementedError(
f"MovingAverageMinMaxObserver's qscheme only support \
torch.per_tensor_symmetric and torch.per_tensor_affine. \
but got: {qscheme}"
)
self.averaging_constant = averaging_constant
if is_dynamic and self.averaging_constant != 1:
raise NotImplementedError(
"MovingAverageMinMaxObserver doesn't support dynamic quantization for "
f"averaging constant of {self.averaging_constant}"
)
super().__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
eps=eps,
is_dynamic=is_dynamic,
**kwargs,
)
def forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach() # avoid keeping autograd tape
x = x.to(self.min_val.dtype)
min_val = self.min_val
max_val = self.max_val
if min_val == float("inf") and max_val == float("-inf"):
min_val, max_val = torch.aminmax(x)
else:
min_val_cur, max_val_cur = torch.aminmax(x)
min_val = min_val + self.averaging_constant * (min_val_cur - min_val)
max_val = max_val + self.averaging_constant * (max_val_cur - max_val)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_orig
| MovingAverageMinMaxObserver |
python | PyCQA__pylint | tests/functional/a/assigning/assigning_non_slot.py | {
"start": 2587,
"end": 2666
} | class ____:
def __get__(self, inst, cls):
return 42
| NonDataDescriptor |
python | pytorch__pytorch | test/onnx/exporter/test_small_models_e2e.py | {
"start": 25767,
"end": 29982
} | class ____(common_utils.TestCase, _WithExport):
def test_group_norm_opset_21(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.group_norm(x, 4)
x = torch.randn(1, 4, 4, 4, dtype=torch.float32)
onnx_program = self.export(Model(), (x,), opset_version=21)
# TODO(after ort support): As of ONNX Runtime 1.22, the operator is not implemented yet.
# call assert_onnx_program after ort support
self.assertIn(
"GroupNormalization",
[node.op_type for node in onnx_program.model.graph],
)
def test_attention_opset_23(self):
class Model(torch.nn.Module):
def forward(self, query, key, value):
return torch.nn.functional.scaled_dot_product_attention(
query, key, value
)
query = torch.rand(32, 8, 128, 64, dtype=torch.float16)
key = torch.rand(32, 8, 128, 64, dtype=torch.float16)
value = torch.rand(32, 8, 128, 64, dtype=torch.float16)
onnx_program = self.export(Model(), (query, key, value), opset_version=23)
self.assertEqual(["Attention"], [n.op_type for n in onnx_program.model.graph])
onnx_testing.assert_onnx_program(onnx_program, atol=1e-2, rtol=1)
def test_rms_norm(self):
"""Test RMS normalization with various configurations."""
class RMSNormModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.rms_norm(x, [3])
x = torch.randn(2, 5, 3)
onnx_program = self.export(RMSNormModel(), (x,), opset_version=23)
onnx_testing.assert_onnx_program(onnx_program)
# Test with multi-dimensional normalized_shape
class RMSNormModel2D(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.rms_norm(x, [7, 3])
x = torch.randn(2, 5, 7, 3)
onnx_program = self.export(RMSNormModel2D(), (x,), opset_version=23)
onnx_testing.assert_onnx_program(onnx_program)
def test_rms_norm_with_weight(self):
"""Test RMS normalization with weight parameter."""
class RMSNormWithWeight(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.ones(3))
def forward(self, x):
return torch.nn.functional.rms_norm(x, [3], weight=self.weight)
x = torch.randn(2, 5, 3)
onnx_program = self.export(RMSNormWithWeight(), (x,), opset_version=23)
onnx_testing.assert_onnx_program(onnx_program)
def test_rms_norm_with_eps(self):
"""Test RMS normalization with custom epsilon."""
class RMSNormWithEps(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.rms_norm(x, [3], eps=1e-5)
x = torch.randn(2, 5, 3)
onnx_program = self.export(RMSNormWithEps(), (x,), opset_version=23)
onnx_testing.assert_onnx_program(onnx_program)
def test_enable_gqa_in_attention_23_with_dropout(self):
class Model(torch.nn.Module):
def forward(self, q, k, v):
return torch.nn.functional.scaled_dot_product_attention( # pylint: disable=not-callable
q, k, v, enable_gqa=True, dropout_p=0.1
)
model = Model()
query = torch.randn(2, 4, 8, 16)
key = torch.randn(2, 2, 8, 16)
value = torch.randn(2, 2, 8, 16)
onnx_program = self.export(
model,
(
query,
key,
value,
),
opset_version=23,
)
# opset23 only uses manually gqa path when dropout is enabled,
# and dropout makes the output non-deterministic,
# so we check for the presence of the ops used in that path.
all_ops = [node.op_type for node in onnx_program.model.graph]
self.assertIn("Unsqueeze", all_ops)
self.assertIn("Expand", all_ops)
self.assertIn("Reshape", all_ops)
if __name__ == "__main__":
common_utils.run_tests()
| DynamoExporterNewOpsetsTest |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_with.py | {
"start": 8416,
"end": 10663
} | class ____(__TestCase, ContextmanagerAssertionMixin):
def testInlineGeneratorSyntax(self):
with mock_contextmanager_generator():
pass
def testUnboundGenerator(self):
mock = mock_contextmanager_generator()
with mock:
pass
self.assertAfterWithManagerInvariantsNoError(mock)
def testInlineGeneratorBoundSyntax(self):
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
# FIXME: In the future, we'll try to keep the bound names from leaking
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToExistingVariable(self):
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToDottedVariable(self):
with mock_contextmanager_generator() as self.foo:
self.assertInWithGeneratorInvariants(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.foo)
def testBoundGenerator(self):
mock = mock_contextmanager_generator()
with mock as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertInWithManagerInvariants(mock)
self.assertAfterWithGeneratorInvariantsNoError(foo)
self.assertAfterWithManagerInvariantsNoError(mock)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
with mock_a as foo:
mock_b = mock_contextmanager_generator()
with mock_b as bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(foo)
self.assertInWithGeneratorInvariants(bar)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsNoError(bar)
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithManagerInvariantsNoError(mock_a)
self.assertAfterWithGeneratorInvariantsNoError(foo)
| NonexceptionalTestCase |
python | wandb__wandb | wandb/vendor/pygments/lexers/graphics.py | {
"start": 3117,
"end": 7092
} | class ____(RegexLexer):
"""
Lexer for PostScript files.
The PostScript Language Reference published by Adobe at
<http://partners.adobe.com/public/developer/en/ps/PLRM.pdf>
is the authority for this.
.. versionadded:: 1.4
"""
name = 'PostScript'
aliases = ['postscript', 'postscr']
filenames = ['*.ps', '*.eps']
mimetypes = ['application/postscript']
delimiter = r'()<>\[\]{}/%\s'
delimiter_end = r'(?=[%s])' % delimiter
valid_name_chars = r'[^%s]' % delimiter
valid_name = r"%s+%s" % (valid_name_chars, delimiter_end)
tokens = {
'root': [
# All comment types
(r'^%!.+\n', Comment.Preproc),
(r'%%.*\n', Comment.Special),
(r'(^%.*\n){2,}', Comment.Multiline),
(r'%.*\n', Comment.Single),
# String literals are awkward; enter separate state.
(r'\(', String, 'stringliteral'),
(r'[{}<>\[\]]', Punctuation),
# Numbers
(r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex),
# Slight abuse: use Oct to signify any explicit base system
(r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)'
r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct),
(r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?'
+ delimiter_end, Number.Float),
(r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer),
# References
(r'\/%s' % valid_name, Name.Variable),
# Names
(valid_name, Name.Function), # Anything else is executed
# These keywords taken from
# <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf>
# Is there an authoritative list anywhere that doesn't involve
# trawling documentation?
(r'(false|true)' + delimiter_end, Keyword.Constant),
# Conditionals / flow control
(r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)'
+ delimiter_end, Keyword.Reserved),
(words((
'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin',
'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat',
'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix',
'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix',
'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end',
'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get',
'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv',
'idtransform', 'index', 'invertmatrix', 'itransform', 'length',
'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto',
'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print',
'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat',
'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run',
'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray',
'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix',
'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt',
'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror',
'transform', 'translate', 'truncate', 'typecheck', 'undefined',
'undefinedfilename', 'undefinedresult'), suffix=delimiter_end),
Name.Builtin),
(r'\s+', Text),
],
'stringliteral': [
(r'[^()\\]+', String),
(r'\\', String.Escape, 'escape'),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'escape': [
(r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'),
default('#pop'),
],
}
| PostScriptLexer |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 5648,
"end": 5933
} | class ____(LocationValueError):
"""Raised when get_host or similar fails to parse the URL input."""
def __init__(self, location: str) -> None:
message = f"Failed to parse: {location}"
super().__init__(message)
self.location = location
| LocationParseError |
python | numpy__numpy | numpy/lib/_iotools.py | {
"start": 13155,
"end": 13437
} | class ____(UserWarning):
"""
Warning issued when a string converter has a problem.
Notes
-----
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
is explicitly suppressed with the "invalid_raise" keyword.
"""
pass
| ConversionWarning |
python | kamyu104__LeetCode-Solutions | Python/word-search-ii.py | {
"start": 522,
"end": 1950
} | class ____(object):
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
visited = [[False for j in xrange(len(board[0]))] for i in xrange(len(board))]
result = {}
trie = TrieNode()
for word in words:
trie.insert(word)
for i in xrange(len(board)):
for j in xrange(len(board[0])):
self.findWordsRecu(board, trie, 0, i, j, visited, [], result)
return result.keys()
def findWordsRecu(self, board, trie, cur, i, j, visited, cur_word, result):
if not trie or i < 0 or i >= len(board) or j < 0 or j >= len(board[0]) or visited[i][j]:
return
if board[i][j] not in trie.leaves:
return
cur_word.append(board[i][j])
next_node = trie.leaves[board[i][j]]
if next_node.is_string:
result["".join(cur_word)] = True
visited[i][j] = True
self.findWordsRecu(board, next_node, cur + 1, i + 1, j, visited, cur_word, result)
self.findWordsRecu(board, next_node, cur + 1, i - 1, j, visited, cur_word, result)
self.findWordsRecu(board, next_node, cur + 1, i, j + 1, visited, cur_word, result)
self.findWordsRecu(board, next_node, cur + 1, i, j - 1, visited, cur_word, result)
visited[i][j] = False
cur_word.pop()
| Solution |
python | conda__conda | tests/plugins/test_env_specs.py | {
"start": 824,
"end": 1220
} | class ____(EnvironmentSpecBase):
extensions = {".random"}
def __init__(self, filename: str):
self.filename = filename
def can_handle(self):
for ext in RandomSpec.extensions:
if self.filename.endswith(ext):
return True
return False
def env(self):
return Environment(prefix="/somewhere", platform=["linux-64"])
| RandomSpec |
python | graphql-python__graphene | graphene/types/tests/test_subscribe_async.py | {
"start": 202,
"end": 2136
} | class ____(ObjectType):
count_to_ten = Field(Int)
async def subscribe_count_to_ten(root, info):
for count in range(1, 11):
yield count
schema = Schema(query=Query, subscription=Subscription)
@mark.asyncio
async def test_subscription():
subscription = "subscription { countToTen }"
result = await schema.subscribe(subscription)
count = 0
async for item in result:
count = item.data["countToTen"]
assert count == 10
@mark.asyncio
async def test_subscription_fails_with_invalid_query():
# It fails if the provided query is invalid
subscription = "subscription { "
result = await schema.subscribe(subscription)
assert not result.data
assert result.errors
assert "Syntax Error: Expected Name, found <EOF>" in str(result.errors[0])
@mark.asyncio
async def test_subscription_fails_when_query_is_not_valid():
# It can't subscribe to two fields at the same time, triggering a
# validation error.
subscription = "subscription { countToTen, b: countToTen }"
result = await schema.subscribe(subscription)
assert not result.data
assert result.errors
assert "Anonymous Subscription must select only one top level field." in str(
result.errors[0]
)
@mark.asyncio
async def test_subscription_with_args():
class Query(ObjectType):
hello = String()
class Subscription(ObjectType):
count_upwards = Field(Int, limit=Int(required=True))
async def subscribe_count_upwards(root, info, limit):
count = 0
while count < limit:
count += 1
yield count
schema = Schema(query=Query, subscription=Subscription)
subscription = "subscription { countUpwards(limit: 5) }"
result = await schema.subscribe(subscription)
count = 0
async for item in result:
count = item.data["countUpwards"]
assert count == 5
| Subscription |
python | ansible__ansible | test/units/parsing/utils/test_addresses.py | {
"start": 138,
"end": 3863
} | class ____(unittest.TestCase):
tests = {
# IPv4 addresses
'192.0.2.3': ['192.0.2.3', None],
'192.0.2.3:23': ['192.0.2.3', 23],
# IPv6 addresses
'::': ['::', None],
'::1': ['::1', None],
'[::1]:442': ['::1', 442],
'abcd:ef98:7654:3210:abcd:ef98:7654:3210': ['abcd:ef98:7654:3210:abcd:ef98:7654:3210', None],
'[abcd:ef98:7654:3210:abcd:ef98:7654:3210]:42': ['abcd:ef98:7654:3210:abcd:ef98:7654:3210', 42],
'1234:5678:9abc:def0:1234:5678:9abc:def0': ['1234:5678:9abc:def0:1234:5678:9abc:def0', None],
'1234::9abc:def0:1234:5678:9abc:def0': ['1234::9abc:def0:1234:5678:9abc:def0', None],
'1234:5678::def0:1234:5678:9abc:def0': ['1234:5678::def0:1234:5678:9abc:def0', None],
'1234:5678:9abc::1234:5678:9abc:def0': ['1234:5678:9abc::1234:5678:9abc:def0', None],
'1234:5678:9abc:def0::5678:9abc:def0': ['1234:5678:9abc:def0::5678:9abc:def0', None],
'1234:5678:9abc:def0:1234::9abc:def0': ['1234:5678:9abc:def0:1234::9abc:def0', None],
'1234:5678:9abc:def0:1234:5678::def0': ['1234:5678:9abc:def0:1234:5678::def0', None],
'1234:5678:9abc:def0:1234:5678::': ['1234:5678:9abc:def0:1234:5678::', None],
'::9abc:def0:1234:5678:9abc:def0': ['::9abc:def0:1234:5678:9abc:def0', None],
'0:0:0:0:0:ffff:1.2.3.4': ['0:0:0:0:0:ffff:1.2.3.4', None],
'0:0:0:0:0:0:1.2.3.4': ['0:0:0:0:0:0:1.2.3.4', None],
'::ffff:1.2.3.4': ['::ffff:1.2.3.4', None],
'::1.2.3.4': ['::1.2.3.4', None],
'1234::': ['1234::', None],
# Invalid IPv6 address
'1234::9abc:def0:1234:5678:9abc::::::::def0': [None, None],
# Hostnames
'some-host': ['some-host', None],
'some-host:80': ['some-host', 80],
'some.host.com:492': ['some.host.com', 492],
'[some.host.com]:493': ['some.host.com', 493],
'a-b.3foo_bar.com:23': ['a-b.3foo_bar.com', 23],
u'fóöbär': [u'fóöbär', None],
u'fóöbär:32': [u'fóöbär', 32],
u'fóöbär.éxàmplê.com:632': [u'fóöbär.éxàmplê.com', 632],
# Various errors
'': [None, None],
'some..host': [None, None],
'some.': [None, None],
'[example.com]': [None, None],
'some-': [None, None],
'some-.foo.com': [None, None],
'some.-foo.com': [None, None],
}
range_tests = {
'192.0.2.[3:10]': ['192.0.2.[3:10]', None],
'192.0.2.[3:10]:23': ['192.0.2.[3:10]', 23],
'abcd:ef98::7654:[1:9]': ['abcd:ef98::7654:[1:9]', None],
'[abcd:ef98::7654:[6:32]]:2222': ['abcd:ef98::7654:[6:32]', 2222],
'[abcd:ef98::7654:[9ab3:fcb7]]:2222': ['abcd:ef98::7654:[9ab3:fcb7]', 2222],
u'fóöb[a:c]r.éxàmplê.com:632': [u'fóöb[a:c]r.éxàmplê.com', 632],
'[a:b]foo.com': ['[a:b]foo.com', None],
'foo[a:b].com': ['foo[a:b].com', None],
'foo[a:b]:42': ['foo[a:b]', 42],
'foo[a-b]-.com': [None, None],
'foo[a-b]:32': [None, None],
'foo[x-y]': [None, None],
}
def test_without_ranges(self):
for t in self.tests:
test = self.tests[t]
try:
(host, port) = parse_address(t)
except Exception:
host = None
port = None
assert host == test[0]
assert port == test[1]
def test_with_ranges(self):
for t in self.range_tests:
test = self.range_tests[t]
try:
(host, port) = parse_address(t, allow_ranges=True)
except Exception:
host = None
port = None
assert host == test[0]
assert port == test[1]
| TestParseAddress |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax.py | {
"start": 1280,
"end": 1338
} | class ____(TypedDict):
my_var: int | str
| CustomTypedDict3 |
python | getsentry__sentry | src/sentry/relay/globalconfig.py | {
"start": 1385,
"end": 3157
} | class ____(TypedDict, total=False):
measurements: MeasurementsConfig
aiModelCosts: AIModelCosts | None
aiOperationTypeMap: AIOperationTypeMap
metricExtraction: MetricExtractionGroups
filters: GenericFiltersConfig | None
spanOpDefaults: SpanOpDefaults
options: dict[str, Any]
def get_global_generic_filters() -> GenericFiltersConfig:
return {
"version": 1,
"filters": [],
}
def span_op_defaults() -> SpanOpDefaults:
return {
"rules": [
{
# If span.data[messaging.system] is set, use span.op "message":
"condition": {
"op": "not",
"inner": {
"op": "eq",
"name": "span.data.messaging\\.system",
"value": None,
},
},
"value": "message",
}
]
}
@metrics.wraps("relay.globalconfig.get")
def get_global_config() -> GlobalConfig:
"""Return the global configuration for Relay."""
global_config: GlobalConfig = {
"measurements": get_measurements_config(),
"aiModelCosts": ai_model_costs_config(),
"aiOperationTypeMap": ai_operation_type_map_config(),
"metricExtraction": global_metric_extraction_groups(),
"spanOpDefaults": span_op_defaults(),
}
filters = get_global_generic_filters()
if filters and len(filters["filters"]) > 0:
global_config["filters"] = filters
options = dict()
for option in RELAY_OPTIONS:
if (value := sentry.options.get(option)) is not None:
options[option] = value
if options:
global_config["options"] = options
return global_config
| GlobalConfig |
python | altair-viz__altair | tools/markup.py | {
"start": 1941,
"end": 3336
} | class ____(_Markdown):
"""
Minor extension to support partial `ast`_ conversion.
Only need to convert the docstring tokens to `.rst`.
.. _ast:
https://mistune.lepture.com/en/latest/guide.html#abstract-syntax-tree
"""
def __init__(
self,
renderer: BaseRenderer | Literal["ast"] | None,
block: BlockParser | None = None,
inline: _InlineParser | None = None,
plugins=None,
) -> None:
if renderer == "ast":
renderer = None
super().__init__(renderer, block, inline, plugins)
def __call__(self, s: str) -> str:
r = super().__call__(s)
if isinstance(r, str):
return unescape(r).replace(r"\ ,", ",").replace(r"\ ", " ")
msg = f"Expected `str` but got {type(r).__name__!r}"
raise TypeError(msg)
def render_tokens(self, tokens: Iterable[Token], /) -> str:
"""
Render ast tokens originating from another parser.
Parameters
----------
tokens
All tokens will be rendered into a single `.rst` string
"""
if self.renderer is None:
msg = "Unable to render tokens without a renderer."
raise TypeError(msg)
state = self.block.state_cls()
s = self.renderer(self._iter_render(tokens, state), state)
return mistune.util.unescape(s)
| RSTParse |
python | python__mypy | mypyc/analysis/dataflow.py | {
"start": 8577,
"end": 11518
} | class ____(BaseAnalysisVisitor[Value]):
"""Visitor for finding defined registers.
Note that this only deals with registers and not temporaries, on
the assumption that we never access temporaries when they might be
undefined.
If strict_errors is True, then we regard any use of LoadErrorValue
as making a register undefined. Otherwise we only do if
`undefines` is set on the error value.
This lets us only consider the things we care about during
uninitialized variable checking while capturing all possibly
undefined things for refcounting.
"""
def __init__(self, strict_errors: bool = False) -> None:
self.strict_errors = strict_errors
def visit_branch(self, op: Branch) -> GenAndKill[Value]:
return set(), set()
def visit_return(self, op: Return) -> GenAndKill[Value]:
return set(), set()
def visit_unreachable(self, op: Unreachable) -> GenAndKill[Value]:
return set(), set()
def visit_register_op(self, op: RegisterOp) -> GenAndKill[Value]:
return set(), set()
def visit_assign(self, op: Assign) -> GenAndKill[Value]:
# Loading an error value may undefine the register.
if isinstance(op.src, LoadErrorValue) and (op.src.undefines or self.strict_errors):
return set(), {op.dest}
else:
return {op.dest}, set()
def visit_assign_multi(self, op: AssignMulti) -> GenAndKill[Value]:
# Array registers are special and we don't track the definedness of them.
return set(), set()
def visit_set_mem(self, op: SetMem) -> GenAndKill[Value]:
return set(), set()
def analyze_maybe_defined_regs(
blocks: list[BasicBlock], cfg: CFG, initial_defined: set[Value]
) -> AnalysisResult[Value]:
"""Calculate potentially defined registers at each CFG location.
A register is defined if it has a value along some path from the initial location.
"""
return run_analysis(
blocks=blocks,
cfg=cfg,
gen_and_kill=DefinedVisitor(),
initial=initial_defined,
backward=False,
kind=MAYBE_ANALYSIS,
)
def analyze_must_defined_regs(
blocks: list[BasicBlock],
cfg: CFG,
initial_defined: set[Value],
regs: Iterable[Value],
strict_errors: bool = False,
) -> AnalysisResult[Value]:
"""Calculate always defined registers at each CFG location.
This analysis can work before exception insertion, since it is a
sound assumption that registers defined in a block might not be
initialized in its error handler.
A register is defined if it has a value along all paths from the
initial location.
"""
return run_analysis(
blocks=blocks,
cfg=cfg,
gen_and_kill=DefinedVisitor(strict_errors=strict_errors),
initial=initial_defined,
backward=False,
kind=MUST_ANALYSIS,
universe=set(regs),
)
| DefinedVisitor |
python | jazzband__django-simple-history | simple_history/tests/tests/utils.py | {
"start": 1273,
"end": 2126
} | class ____:
def db_for_read(self, model, **hints):
if model._meta.app_label == "external":
return OTHER_DB_NAME
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == "external":
return OTHER_DB_NAME
return None
def allow_relation(self, obj1, obj2, **hints):
if obj1._meta.app_label == "external" and obj2._meta.app_label == "external":
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
if app_label == "external":
return db == OTHER_DB_NAME
elif db == OTHER_DB_NAME:
return False
else:
return None
database_router_override_settings = {
"DATABASE_ROUTERS": ["simple_history.tests.tests.utils.TestDbRouter"]
}
| TestDbRouter |
python | facelessuser__pymdown-extensions | pymdownx/escapeall.py | {
"start": 2403,
"end": 3486
} | class ____(Extension):
"""Extension that allows you to escape everything."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'hardbreak': [
False,
"Turn escaped newlines to hardbreaks - Default: False"
],
'nbsp': [
False,
"Turn escaped spaces to non-breaking spaces - Default: False"
]
}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md):
"""Escape all."""
config = self.getConfigs()
hardbreak = config['hardbreak']
md.inlinePatterns.register(
EscapeAllPattern(ESCAPE_NO_NL_RE if hardbreak else ESCAPE_RE, config['nbsp'], md),
"escape",
180
)
if config['hardbreak']:
md.inlinePatterns.register(SubstituteTagInlineProcessor(HARDBREAK_RE, 'br'), "hardbreak", 5.1)
def makeExtension(*args, **kwargs):
"""Return extension."""
return EscapeAllExtension(*args, **kwargs)
| EscapeAllExtension |
python | simonw__datasette | datasette/cli.py | {
"start": 1377,
"end": 25932
} | class ____(CompositeParamType):
name = "setting"
arity = 2
def convert(self, config, param, ctx):
name, value = config
if name in DEFAULT_SETTINGS:
# For backwards compatibility with how this worked prior to
# Datasette 1.0, we turn bare setting names into setting.name
# Type checking for those older settings
default = DEFAULT_SETTINGS[name]
name = "settings.{}".format(name)
if isinstance(default, bool):
try:
return name, "true" if value_as_boolean(value) else "false"
except ValueAsBooleanError:
self.fail(f'"{name}" should be on/off/true/false/1/0', param, ctx)
elif isinstance(default, int):
if not value.isdigit():
self.fail(f'"{name}" should be an integer', param, ctx)
return name, value
elif isinstance(default, str):
return name, value
else:
# Should never happen:
self.fail("Invalid option")
return name, value
def sqlite_extensions(fn):
fn = click.option(
"sqlite_extensions",
"--load-extension",
type=LoadExtension(),
envvar="DATASETTE_LOAD_EXTENSION",
multiple=True,
help="Path to a SQLite extension to load, and optional entrypoint",
)(fn)
# Wrap it in a custom error handler
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except AttributeError as e:
if "enable_load_extension" in str(e):
raise click.ClickException(
textwrap.dedent(
"""
Your Python installation does not have the ability to load SQLite extensions.
More information: https://datasette.io/help/extensions
"""
).strip()
)
raise
return wrapped
@click.group(cls=DefaultGroup, default="serve", default_if_no_args=True)
@click.version_option(version=__version__)
def cli():
"""
Datasette is an open source multi-tool for exploring and publishing data
\b
About Datasette: https://datasette.io/
Full documentation: https://docs.datasette.io/
"""
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
@click.option("--inspect-file", default="-")
@sqlite_extensions
def inspect(files, inspect_file, sqlite_extensions):
"""
Generate JSON summary of provided database files
This can then be passed to "datasette --inspect-file" to speed up count
operations against immutable database files.
"""
inspect_data = run_sync(lambda: inspect_(files, sqlite_extensions))
if inspect_file == "-":
sys.stdout.write(json.dumps(inspect_data, indent=2))
else:
with open(inspect_file, "w") as fp:
fp.write(json.dumps(inspect_data, indent=2))
async def inspect_(files, sqlite_extensions):
app = Datasette([], immutables=files, sqlite_extensions=sqlite_extensions)
data = {}
for name, database in app.databases.items():
counts = await database.table_counts(limit=3600 * 1000)
data[name] = {
"hash": database.hash,
"size": database.size,
"file": database.path,
"tables": {
table_name: {"count": table_count}
for table_name, table_count in counts.items()
},
}
return data
@cli.group()
def publish():
"""Publish specified SQLite database files to the internet along with a Datasette-powered interface and API"""
pass
# Register publish plugins
pm.hook.publish_subcommand(publish=publish)
@cli.command()
@click.option("--all", help="Include built-in default plugins", is_flag=True)
@click.option(
"--requirements", help="Output requirements.txt of installed plugins", is_flag=True
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
def plugins(all, requirements, plugins_dir):
"""List currently installed plugins"""
app = Datasette([], plugins_dir=plugins_dir)
if requirements:
for plugin in app._plugins():
if plugin["version"]:
click.echo("{}=={}".format(plugin["name"], plugin["version"]))
else:
click.echo(json.dumps(app._plugins(all=all), indent=4))
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1, required=True)
@click.option(
"-t",
"--tag",
help="Name for the resulting Docker container, can optionally use name:tag format",
)
@click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON/YAML file containing metadata to publish",
)
@click.option("--extra-options", help="Extra options to pass to datasette serve")
@click.option("--branch", help="Install datasette from a GitHub branch e.g. main")
@click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@click.option(
"--static",
type=StaticMount(),
help="Serve static files from this directory at /MOUNT/...",
multiple=True,
)
@click.option(
"--install", help="Additional packages (e.g. plugins) to install", multiple=True
)
@click.option("--spatialite", is_flag=True, help="Enable SpatialLite extension")
@click.option("--version-note", help="Additional note to show on /-/versions")
@click.option(
"--secret",
help="Secret used for signing secure values, such as signed cookies",
envvar="DATASETTE_PUBLISH_SECRET",
default=lambda: os.urandom(32).hex(),
)
@click.option(
"-p",
"--port",
default=8001,
type=click.IntRange(1, 65535),
help="Port to run the server on, defaults to 8001",
)
@click.option("--title", help="Title for metadata")
@click.option("--license", help="License label for metadata")
@click.option("--license_url", help="License URL for metadata")
@click.option("--source", help="Source label for metadata")
@click.option("--source_url", help="Source URL for metadata")
@click.option("--about", help="About label for metadata")
@click.option("--about_url", help="About URL for metadata")
def package(
files,
tag,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
secret,
port,
**extra_metadata,
):
"""Package SQLite files into a Datasette Docker container"""
if not shutil.which("docker"):
click.secho(
' The package command requires "docker" to be installed and configured ',
bg="red",
fg="white",
bold=True,
err=True,
)
sys.exit(1)
with temporary_docker_directory(
files,
"datasette",
metadata=metadata,
extra_options=extra_options,
branch=branch,
template_dir=template_dir,
plugins_dir=plugins_dir,
static=static,
install=install,
spatialite=spatialite,
version_note=version_note,
secret=secret,
extra_metadata=extra_metadata,
port=port,
):
args = ["docker", "build"]
if tag:
args.append("-t")
args.append(tag)
args.append(".")
call(args)
@cli.command()
@click.argument("packages", nargs=-1)
@click.option(
"-U", "--upgrade", is_flag=True, help="Upgrade packages to latest version"
)
@click.option(
"-r",
"--requirement",
type=click.Path(exists=True),
help="Install from requirements file",
)
@click.option(
"-e",
"--editable",
help="Install a project in editable mode from this path",
)
def install(packages, upgrade, requirement, editable):
"""Install plugins and packages from PyPI into the same environment as Datasette"""
if not packages and not requirement and not editable:
raise click.UsageError("Please specify at least one package to install")
args = ["pip", "install"]
if upgrade:
args += ["--upgrade"]
if editable:
args += ["--editable", editable]
if requirement:
args += ["-r", requirement]
args += list(packages)
sys.argv = args
run_module("pip", run_name="__main__")
@cli.command()
@click.argument("packages", nargs=-1, required=True)
@click.option("-y", "--yes", is_flag=True, help="Don't ask for confirmation")
def uninstall(packages, yes):
"""Uninstall plugins and Python packages from the Datasette environment"""
sys.argv = ["pip", "uninstall"] + list(packages) + (["-y"] if yes else [])
run_module("pip", run_name="__main__")
@cli.command()
@click.argument("files", type=click.Path(), nargs=-1)
@click.option(
"-i",
"--immutable",
type=click.Path(exists=True),
help="Database files to open in immutable mode",
multiple=True,
)
@click.option(
"-h",
"--host",
default="127.0.0.1",
help=(
"Host for server. Defaults to 127.0.0.1 which means only connections "
"from the local machine will be allowed. Use 0.0.0.0 to listen to "
"all IPs and allow access from other machines."
),
)
@click.option(
"-p",
"--port",
default=8001,
type=click.IntRange(0, 65535),
help="Port for server, defaults to 8001. Use -p 0 to automatically assign an available port.",
)
@click.option(
"--uds",
help="Bind to a Unix domain socket",
)
@click.option(
"--reload",
is_flag=True,
help="Automatically reload if code or metadata change detected - useful for development",
)
@click.option(
"--cors", is_flag=True, help="Enable CORS by serving Access-Control-Allow-Origin: *"
)
@sqlite_extensions
@click.option(
"--inspect-file", help='Path to JSON file created using "datasette inspect"'
)
@click.option(
"-m",
"--metadata",
type=click.File(mode="r"),
help="Path to JSON/YAML file containing license/source metadata",
)
@click.option(
"--template-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom templates",
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
@click.option(
"--static",
type=StaticMount(),
help="Serve static files from this directory at /MOUNT/...",
multiple=True,
)
@click.option("--memory", is_flag=True, help="Make /_memory database available")
@click.option(
"-c",
"--config",
type=click.File(mode="r"),
help="Path to JSON/YAML Datasette configuration file",
)
@click.option(
"-s",
"--setting",
"settings",
type=Setting(),
help="nested.key, value setting to use in Datasette configuration",
multiple=True,
)
@click.option(
"--secret",
help="Secret used for signing secure values, such as signed cookies",
envvar="DATASETTE_SECRET",
)
@click.option(
"--root",
help="Output URL that sets a cookie authenticating the root user",
is_flag=True,
)
@click.option(
"--default-deny",
help="Deny all permissions by default",
is_flag=True,
)
@click.option(
"--get",
help="Run an HTTP GET request against this path, print results and exit",
)
@click.option(
"--headers",
is_flag=True,
help="Include HTTP headers in --get output",
)
@click.option(
"--token",
help="API token to send with --get requests",
)
@click.option(
"--actor",
help="Actor to use for --get requests (JSON string)",
)
@click.option("--version-note", help="Additional note to show on /-/versions")
@click.option("--help-settings", is_flag=True, help="Show available settings")
@click.option("--pdb", is_flag=True, help="Launch debugger on any errors")
@click.option(
"-o",
"--open",
"open_browser",
is_flag=True,
help="Open Datasette in your web browser",
)
@click.option(
"--create",
is_flag=True,
help="Create database files if they do not exist",
)
@click.option(
"--crossdb",
is_flag=True,
help="Enable cross-database joins using the /_memory database",
)
@click.option(
"--nolock",
is_flag=True,
help="Ignore locking, open locked files in read-only mode",
)
@click.option(
"--ssl-keyfile",
help="SSL key file",
envvar="DATASETTE_SSL_KEYFILE",
)
@click.option(
"--ssl-certfile",
help="SSL certificate file",
envvar="DATASETTE_SSL_CERTFILE",
)
@click.option(
"--internal",
type=click.Path(),
help="Path to a persistent Datasette internal SQLite database",
)
def serve(
files,
immutable,
host,
port,
uds,
reload,
cors,
sqlite_extensions,
inspect_file,
metadata,
template_dir,
plugins_dir,
static,
memory,
config,
settings,
secret,
root,
default_deny,
get,
headers,
token,
actor,
version_note,
help_settings,
pdb,
open_browser,
create,
crossdb,
nolock,
ssl_keyfile,
ssl_certfile,
internal,
return_instance=False,
):
"""Serve up specified SQLite database files with a web UI"""
if help_settings:
formatter = formatting.HelpFormatter()
with formatter.section("Settings"):
formatter.write_dl(
[
(option.name, f"{option.help} (default={option.default})")
for option in SETTINGS
]
)
click.echo(formatter.getvalue())
sys.exit(0)
if reload:
import hupper
reloader = hupper.start_reloader("datasette.cli.serve")
if immutable:
reloader.watch_files(immutable)
if config:
reloader.watch_files([config.name])
if metadata:
reloader.watch_files([metadata.name])
inspect_data = None
if inspect_file:
with open(inspect_file) as fp:
inspect_data = json.load(fp)
metadata_data = None
if metadata:
metadata_data = parse_metadata(metadata.read())
config_data = None
if config:
config_data = parse_metadata(config.read())
config_data = config_data or {}
# Merge in settings from -s/--setting
if settings:
settings_updates = pairs_to_nested_config(settings)
# Merge recursively, to avoid over-writing nested values
# https://github.com/simonw/datasette/issues/2389
deep_dict_update(config_data, settings_updates)
kwargs = dict(
immutables=immutable,
cache_headers=not reload,
cors=cors,
inspect_data=inspect_data,
config=config_data,
metadata=metadata_data,
sqlite_extensions=sqlite_extensions,
template_dir=template_dir,
plugins_dir=plugins_dir,
static_mounts=static,
settings=None, # These are passed in config= now
memory=memory,
secret=secret,
version_note=version_note,
pdb=pdb,
crossdb=crossdb,
nolock=nolock,
internal=internal,
default_deny=default_deny,
)
# Separate directories from files
directories = [f for f in files if os.path.isdir(f)]
file_paths = [f for f in files if not os.path.isdir(f)]
# Handle config_dir - only one directory allowed
if len(directories) > 1:
raise click.ClickException(
"Cannot pass multiple directories. Pass a single directory as config_dir."
)
elif len(directories) == 1:
kwargs["config_dir"] = pathlib.Path(directories[0])
# Verify list of files, create if needed (and --create)
for file in file_paths:
if not pathlib.Path(file).exists():
if create:
sqlite3.connect(file).execute("vacuum")
else:
raise click.ClickException(
"Invalid value for '[FILES]...': Path '{}' does not exist.".format(
file
)
)
# Check for duplicate files by resolving all paths to their absolute forms
# Collect all database files that will be loaded (explicit files + config_dir files)
all_db_files = []
# Add explicit files
for file in file_paths:
all_db_files.append((file, pathlib.Path(file).resolve()))
# Add config_dir databases if config_dir is set
if "config_dir" in kwargs:
config_dir = kwargs["config_dir"]
for ext in ("db", "sqlite", "sqlite3"):
for db_file in config_dir.glob(f"*.{ext}"):
all_db_files.append((str(db_file), db_file.resolve()))
# Check for duplicates
seen = {}
for original_path, resolved_path in all_db_files:
if resolved_path in seen:
raise click.ClickException(
f"Duplicate database file: '{original_path}' and '{seen[resolved_path]}' "
f"both refer to {resolved_path}"
)
seen[resolved_path] = original_path
files = file_paths
try:
ds = Datasette(files, **kwargs)
except SpatialiteNotFound:
raise click.ClickException("Could not find SpatiaLite extension")
except StartupError as e:
raise click.ClickException(e.args[0])
if return_instance:
# Private utility mechanism for writing unit tests
return ds
# Run the "startup" plugin hooks
run_sync(ds.invoke_startup)
# Run async soundness checks - but only if we're not under pytest
run_sync(lambda: check_databases(ds))
if headers and not get:
raise click.ClickException("--headers can only be used with --get")
if token and not get:
raise click.ClickException("--token can only be used with --get")
if get:
client = TestClient(ds)
request_headers = {}
if token:
request_headers["Authorization"] = "Bearer {}".format(token)
cookies = {}
if actor:
cookies["ds_actor"] = client.actor_cookie(json.loads(actor))
response = client.get(get, headers=request_headers, cookies=cookies)
if headers:
# Output HTTP status code, headers, two newlines, then the response body
click.echo(f"HTTP/1.1 {response.status}")
for key, value in response.headers.items():
click.echo(f"{key}: {value}")
if response.text:
click.echo()
click.echo(response.text)
else:
click.echo(response.text)
exit_code = 0 if response.status == 200 else 1
sys.exit(exit_code)
return
# Start the server
url = None
if root:
ds.root_enabled = True
url = "http://{}:{}{}?token={}".format(
host, port, ds.urls.path("-/auth-token"), ds._root_token
)
click.echo(url)
if open_browser:
if url is None:
# Figure out most convenient URL - to table, database or homepage
path = run_sync(lambda: initial_path_for_datasette(ds))
url = f"http://{host}:{port}{path}"
webbrowser.open(url)
uvicorn_kwargs = dict(
host=host, port=port, log_level="info", lifespan="on", workers=1
)
if uds:
uvicorn_kwargs["uds"] = uds
if ssl_keyfile:
uvicorn_kwargs["ssl_keyfile"] = ssl_keyfile
if ssl_certfile:
uvicorn_kwargs["ssl_certfile"] = ssl_certfile
uvicorn.run(ds.app(), **uvicorn_kwargs)
@cli.command()
@click.argument("id")
@click.option(
"--secret",
help="Secret used for signing the API tokens",
envvar="DATASETTE_SECRET",
required=True,
)
@click.option(
"-e",
"--expires-after",
help="Token should expire after this many seconds",
type=int,
)
@click.option(
"alls",
"-a",
"--all",
type=str,
metavar="ACTION",
multiple=True,
help="Restrict token to this action",
)
@click.option(
"databases",
"-d",
"--database",
type=(str, str),
metavar="DB ACTION",
multiple=True,
help="Restrict token to this action on this database",
)
@click.option(
"resources",
"-r",
"--resource",
type=(str, str, str),
metavar="DB RESOURCE ACTION",
multiple=True,
help="Restrict token to this action on this database resource (a table, SQL view or named query)",
)
@click.option(
"--debug",
help="Show decoded token",
is_flag=True,
)
@click.option(
"--plugins-dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help="Path to directory containing custom plugins",
)
def create_token(
id, secret, expires_after, alls, databases, resources, debug, plugins_dir
):
"""
Create a signed API token for the specified actor ID
Example:
datasette create-token root --secret mysecret
To allow only "view-database-download" for all databases:
\b
datasette create-token root --secret mysecret \\
--all view-database-download
To allow "create-table" against a specific database:
\b
datasette create-token root --secret mysecret \\
--database mydb create-table
To allow "insert-row" against a specific table:
\b
datasette create-token root --secret myscret \\
--resource mydb mytable insert-row
Restricted actions can be specified multiple times using
multiple --all, --database, and --resource options.
Add --debug to see a decoded version of the token.
"""
ds = Datasette(secret=secret, plugins_dir=plugins_dir)
# Run ds.invoke_startup() in an event loop
run_sync(ds.invoke_startup)
# Warn about any unknown actions
actions = []
actions.extend(alls)
actions.extend([p[1] for p in databases])
actions.extend([p[2] for p in resources])
for action in actions:
if not ds.actions.get(action):
click.secho(
f" Unknown permission: {action} ",
fg="red",
err=True,
)
restrict_database = {}
for database, action in databases:
restrict_database.setdefault(database, []).append(action)
restrict_resource = {}
for database, resource, action in resources:
restrict_resource.setdefault(database, {}).setdefault(resource, []).append(
action
)
token = ds.create_token(
id,
expires_after=expires_after,
restrict_all=alls,
restrict_database=restrict_database,
restrict_resource=restrict_resource,
)
click.echo(token)
if debug:
encoded = token[len("dstok_") :]
click.echo("\nDecoded:\n")
click.echo(json.dumps(ds.unsign(encoded, namespace="token"), indent=2))
pm.hook.register_commands(cli=cli)
async def check_databases(ds):
# Run check_connection against every connected database
# to confirm they are all usable
for database in list(ds.databases.values()):
try:
await database.execute_fn(check_connection)
except SpatialiteConnectionProblem:
suggestion = ""
try:
find_spatialite()
suggestion = "\n\nTry adding the --load-extension=spatialite option."
except SpatialiteNotFound:
pass
raise click.UsageError(
"It looks like you're trying to load a SpatiaLite"
+ " database without first loading the SpatiaLite module."
+ suggestion
+ "\n\nRead more: https://docs.datasette.io/en/stable/spatialite.html"
)
except ConnectionProblem as e:
raise click.UsageError(
f"Connection to {database.path} failed check: {str(e.args[0])}"
)
# If --crossdb and more than SQLITE_LIMIT_ATTACHED show warning
if (
ds.crossdb
and len([db for db in ds.databases.values() if not db.is_memory])
> SQLITE_LIMIT_ATTACHED
):
msg = (
"Warning: --crossdb only works with the first {} attached databases".format(
SQLITE_LIMIT_ATTACHED
)
)
click.echo(click.style(msg, bold=True, fg="yellow"), err=True)
| Setting |
python | sympy__sympy | sympy/matrices/exceptions.py | {
"start": 225,
"end": 375
} | class ____(ValueError, MatrixError):
"""The matrix in not invertible (division by multidimensional zero error)."""
pass
| NonInvertibleMatrixError |
python | scipy__scipy | scipy/stats/tests/test_hypotests.py | {
"start": 88480,
"end": 92379
} | class ____:
@pytest.mark.parametrize("c1, n1, c2, n2, p_expect", (
# example from [1], 6. Illustrative examples: Example 1
[0, 100, 3, 100, 0.0884],
[2, 100, 6, 100, 0.1749]
))
def test_paper_examples(self, c1, n1, c2, n2, p_expect):
res = stats.poisson_means_test(c1, n1, c2, n2)
assert_allclose(res.pvalue, p_expect, atol=1e-4)
@pytest.mark.parametrize("c1, n1, c2, n2, p_expect, alt, d", (
# These test cases are produced by the wrapped fortran code from the
# original authors. Using a slightly modified version of this fortran,
# found here, https://github.com/nolanbconaway/poisson-etest,
# additional tests were created.
[20, 10, 20, 10, 0.9999997568929630, 'two-sided', 0],
[10, 10, 10, 10, 0.9999998403241203, 'two-sided', 0],
[50, 15, 1, 1, 0.09920321053409643, 'two-sided', .05],
[3, 100, 20, 300, 0.12202725450896404, 'two-sided', 0],
[3, 12, 4, 20, 0.40416087318539173, 'greater', 0],
[4, 20, 3, 100, 0.008053640402974236, 'greater', 0],
# publishing paper does not include a `less` alternative,
# so it was calculated with switched argument order and
# alternative="greater"
[4, 20, 3, 10, 0.3083216325432898, 'less', 0],
[1, 1, 50, 15, 0.09322998607245102, 'less', 0]
))
def test_fortran_authors(self, c1, n1, c2, n2, p_expect, alt, d):
res = stats.poisson_means_test(c1, n1, c2, n2, alternative=alt, diff=d)
assert_allclose(res.pvalue, p_expect, atol=2e-6, rtol=1e-16)
def test_different_results(self):
# The implementation in Fortran is known to break down at higher
# counts and observations, so we expect different results. By
# inspection we can infer the p-value to be near one.
count1, count2 = 10000, 10000
nobs1, nobs2 = 10000, 10000
res = stats.poisson_means_test(count1, nobs1, count2, nobs2)
assert_allclose(res.pvalue, 1)
def test_less_than_zero_lambda_hat2(self):
# demonstrates behavior that fixes a known fault from original Fortran.
# p-value should clearly be near one.
count1, count2 = 0, 0
nobs1, nobs2 = 1, 1
res = stats.poisson_means_test(count1, nobs1, count2, nobs2)
assert_allclose(res.pvalue, 1)
def test_input_validation(self):
count1, count2 = 0, 0
nobs1, nobs2 = 1, 1
# test non-integral events
message = '`k1` and `k2` must be integers.'
with assert_raises(TypeError, match=message):
stats.poisson_means_test(.7, nobs1, count2, nobs2)
with assert_raises(TypeError, match=message):
stats.poisson_means_test(count1, nobs1, .7, nobs2)
# test negative events
message = '`k1` and `k2` must be greater than or equal to 0.'
with assert_raises(ValueError, match=message):
stats.poisson_means_test(-1, nobs1, count2, nobs2)
with assert_raises(ValueError, match=message):
stats.poisson_means_test(count1, nobs1, -1, nobs2)
# test negative sample size
message = '`n1` and `n2` must be greater than 0.'
with assert_raises(ValueError, match=message):
stats.poisson_means_test(count1, -1, count2, nobs2)
with assert_raises(ValueError, match=message):
stats.poisson_means_test(count1, nobs1, count2, -1)
# test negative difference
message = 'diff must be greater than or equal to 0.'
with assert_raises(ValueError, match=message):
stats.poisson_means_test(count1, nobs1, count2, nobs2, diff=-1)
# test invalid alternative
message = 'Alternative must be one of ...'
with assert_raises(ValueError, match=message):
stats.poisson_means_test(1, 2, 1, 2, alternative='error')
| TestPoissonMeansTest |
python | huggingface__transformers | src/transformers/models/luke/modeling_luke.py | {
"start": 16584,
"end": 18537
} | class ____(nn.Module):
def __init__(self, config: LukeConfig):
super().__init__()
self.config = config
self.entity_embeddings = nn.Embedding(config.entity_vocab_size, config.entity_emb_size, padding_idx=0)
if config.entity_emb_size != config.hidden_size:
self.entity_embedding_dense = nn.Linear(config.entity_emb_size, config.hidden_size, bias=False)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self,
entity_ids: torch.LongTensor,
position_ids: torch.LongTensor,
token_type_ids: Optional[torch.LongTensor] = None,
):
if token_type_ids is None:
token_type_ids = torch.zeros_like(entity_ids)
entity_embeddings = self.entity_embeddings(entity_ids)
if self.config.entity_emb_size != self.config.hidden_size:
entity_embeddings = self.entity_embedding_dense(entity_embeddings)
position_embeddings = self.position_embeddings(position_ids.clamp(min=0))
position_embedding_mask = (position_ids != -1).type_as(position_embeddings).unsqueeze(-1)
position_embeddings = position_embeddings * position_embedding_mask
position_embeddings = torch.sum(position_embeddings, dim=-2)
position_embeddings = position_embeddings / position_embedding_mask.sum(dim=-2).clamp(min=1e-7)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = entity_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
| LukeEntityEmbeddings |
python | jazzband__tablib | src/tablib/_vendor/dbfpy/header.py | {
"start": 697,
"end": 9385
} | class ____:
"""Dbf header definition.
For more information about dbf header format visit
`http://www.clicketyclick.dk/databases/xbase/format/dbf.html#DBF_STRUCT`
Examples:
Create an empty dbf header and add some field definitions:
dbfh = DbfHeader()
dbfh.addField(("name", "C", 10))
dbfh.addField(("date", "D"))
dbfh.addField(DbfNumericFieldDef("price", 5, 2))
Create a dbf header with field definitions:
dbfh = DbfHeader([
("name", "C", 10),
("date", "D"),
DbfNumericFieldDef("price", 5, 2),
])
"""
__slots__ = ("signature", "fields", "lastUpdate", "recordLength",
"recordCount", "headerLength", "changed", "_ignore_errors")
# instance construction and initialization methods
def __init__(self, fields=None, headerLength=0, recordLength=0,
recordCount=0, signature=0x03, lastUpdate=None, ignoreErrors=False):
"""Initialize instance.
Arguments:
fields:
a list of field definitions;
recordLength:
size of the records;
headerLength:
size of the header;
recordCount:
number of records stored in DBF;
signature:
version number (aka signature). using 0x03 as a default meaning
"File without DBT". for more information about this field visit
``http://www.clicketyclick.dk/databases/xbase/format/dbf.html#DBF_NOTE_1_TARGET``
lastUpdate:
date of the DBF's update. this could be a string ('yymmdd' or
'yyyymmdd'), timestamp (int or float), datetime/date value,
a sequence (assuming (yyyy, mm, dd, ...)) or an object having
callable ``ticks`` field.
ignoreErrors:
error processing mode for DBF fields (boolean)
"""
self.signature = signature
if fields is None:
self.fields = []
else:
self.fields = list(fields)
self.lastUpdate = getDate(lastUpdate)
self.recordLength = recordLength
self.headerLength = headerLength
self.recordCount = recordCount
self.ignoreErrors = ignoreErrors
# XXX: I'm not sure this is safe to
# initialize `self.changed` in this way
self.changed = bool(self.fields)
# @classmethod
def fromString(cls, string):
"""Return header instance from the string object."""
return cls.fromStream(io.StringIO(str(string)))
fromString = classmethod(fromString)
# @classmethod
def fromStream(cls, stream):
"""Return header object from the stream."""
stream.seek(0)
first_32 = stream.read(32)
if not isinstance(first_32, bytes):
_data = bytes(first_32, sys.getfilesystemencoding())
_data = first_32
(_cnt, _hdrLen, _recLen) = struct.unpack("<I2H", _data[4:12])
# reserved = _data[12:32]
_year = _data[1]
if _year < 80:
# dBase II started at 1980. It is quite unlikely
# that actual last update date is before that year.
_year += 2000
else:
_year += 1900
# create header object
_obj = cls(None, _hdrLen, _recLen, _cnt, _data[0],
(_year, _data[2], _data[3]))
# append field definitions
# position 0 is for the deletion flag
_pos = 1
_data = stream.read(1)
while _data != b'\r':
_data += stream.read(31)
_fld = fields.lookupFor(_data[11]).fromString(_data, _pos)
_obj._addField(_fld)
_pos = _fld.end
_data = stream.read(1)
return _obj
fromStream = classmethod(fromStream)
# properties
year = property(lambda self: self.lastUpdate.year)
month = property(lambda self: self.lastUpdate.month)
day = property(lambda self: self.lastUpdate.day)
def ignoreErrors(self, value):
"""Update `ignoreErrors` flag on self and all fields"""
self._ignore_errors = value = bool(value)
for _field in self.fields:
_field.ignoreErrors = value
ignoreErrors = property(
lambda self: self._ignore_errors,
ignoreErrors,
doc="""Error processing mode for DBF field value conversion
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.
""")
# object representation
def __repr__(self):
_rv = """\
Version (signature): 0x%02x
Last update: %s
Header length: %d
Record length: %d
Record count: %d
FieldName Type Len Dec
"""
_rv = _rv % (self.signature, self.lastUpdate, self.headerLength,
self.recordLength, self.recordCount)
_rv += "\n".join(
["%10s %4s %3s %3s" % _fld.fieldInfo() for _fld in self.fields] # noqa: UP031
)
return _rv
# internal methods
def _addField(self, *defs):
"""Internal variant of the `addField` method.
This method doesn't set `self.changed` field to True.
Return value is a length of the appended records.
Note: this method doesn't modify ``recordLength`` and
``headerLength`` fields. Use `addField` instead of this
method if you don't exactly know what you're doing.
"""
# insure we have dbf.DbfFieldDef instances first (instantiation
# from the tuple could raise an error, in such a case I don't
# wanna add any of the definitions -- all will be ignored)
_defs = []
_recordLength = 0
for _def in defs:
if isinstance(_def, fields.DbfFieldDef):
_obj = _def
else:
(_name, _type, _len, _dec) = (tuple(_def) + (None,) * 4)[:4]
_cls = fields.lookupFor(_type)
_obj = _cls(_name, _len, _dec, ignoreErrors=self._ignore_errors)
_recordLength += _obj.length
_defs.append(_obj)
# and now extend field definitions and
# update record length
self.fields += _defs
return _recordLength
# interface methods
def addField(self, *defs):
"""Add field definition to the header.
Examples:
dbfh.addField(
("name", "C", 20),
dbf.DbfCharacterFieldDef("surname", 20),
dbf.DbfDateFieldDef("birthdate"),
("member", "L"),
)
dbfh.addField(("price", "N", 5, 2))
dbfh.addField(dbf.DbfNumericFieldDef("origprice", 5, 2))
"""
_oldLen = self.recordLength
self.recordLength += self._addField(*defs)
if not _oldLen:
self.recordLength += 1
# XXX: may be just use:
# self.recordeLength += self._addField(*defs) + bool(not _oldLen)
# recalculate headerLength
self.headerLength = 32 + (32 * len(self.fields)) + 1
self.changed = True
def write(self, stream):
"""Encode and write header to the stream."""
stream.seek(0)
stream.write(self.toString())
fields = [_fld.toString() for _fld in self.fields]
stream.write(''.join(fields).encode(sys.getfilesystemencoding()))
stream.write(b'\x0D') # cr at end of all header data
self.changed = False
def toString(self):
"""Returned 32 chars length string with encoded header."""
return struct.pack("<4BI2H",
self.signature,
self.year - 1900,
self.month,
self.day,
self.recordCount,
self.headerLength,
self.recordLength) + (b'\x00' * 20)
# TODO: figure out if bytes(utf-8) is correct here.
def setCurrentDate(self):
"""Update ``self.lastUpdate`` field with current date value."""
self.lastUpdate = datetime.date.today()
def __getitem__(self, item):
"""Return a field definition by numeric index or name string"""
if isinstance(item, str):
_name = item.upper()
for _field in self.fields:
if _field.name == _name:
return _field
else:
raise KeyError(item)
else:
# item must be field index
return self.fields[item]
# vim: et sts=4 sw=4 :
| DbfHeader |
python | huggingface__transformers | src/transformers/models/aria/modeling_aria.py | {
"start": 8267,
"end": 10762
} | class ____(nn.Module):
"""
Shared Expert MLP for shared experts.
Unlike routed experts, shared experts process all tokens without routing.
This class reconfigures the intermediate size in comparison to the LlamaMLP.
Args:
config (`AriaTextConfig`): Configuration object for the Aria language model.
"""
def __init__(self, config: AriaTextConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def sequential_experts_gemm(token_states, expert_weights, tokens_per_expert):
"""
Compute the matrix multiplication (GEMM) for each expert sequentially. This approach is computationally inefficient, especially when dealing with a large number of experts.
Args:
token_states (torch.Tensor): Input tensor of shape (num_tokens, in_features).
expert_weights (torch.Tensor): Weight tensor of shape (num_experts, in_features, out_features).
tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor of shape (num_tokens, out_features).
"""
num_tokens = token_states.shape[0]
out_features = expert_weights.shape[-1]
output = torch.zeros(num_tokens, out_features, dtype=token_states.dtype, device=token_states.device)
cumsum_num_tokens = torch.cumsum(tokens_per_expert, dim=0)
# Insert zero at the beginning for offset index's convenience
zero_tensor = torch.zeros(1, dtype=torch.long, device=cumsum_num_tokens.device)
cumsum_num_tokens = torch.cat((zero_tensor, cumsum_num_tokens))
for expert_num in range(expert_weights.shape[0]):
start = cumsum_num_tokens[expert_num]
end = cumsum_num_tokens[expert_num + 1]
tokens = token_states[start:end]
out = torch.matmul(tokens, expert_weights[expert_num])
output[start:end] = out
return output
| AriaSharedExpertsMLP |
python | getsentry__sentry | tests/sentry/utils/test_circuit_breaker2.py | {
"start": 4293,
"end": 8443
} | class ____(TestCase):
def setUp(self) -> None:
self.config = DEFAULT_CONFIG
self.breaker = MockCircuitBreaker("dogs_are_great", self.config)
# Clear all existing keys from redis
self.breaker.redis_pipeline.flushall()
self.breaker.redis_pipeline.execute()
def test_sets_default_values(self) -> None:
breaker = self.breaker
assert breaker.__dict__ == {
"key": "dogs_are_great",
"broken_state_key": "dogs_are_great.circuit_breaker.broken",
"recovery_state_key": "dogs_are_great.circuit_breaker.in_recovery",
"error_limit": 200,
"recovery_error_limit": 20,
"window": 3600,
"window_granularity": 180,
"broken_state_duration": 120,
"recovery_duration": 7200,
# These can't be compared with a simple equality check and therefore are tested
# individually below
"limiter": ANY,
"primary_quota": ANY,
"recovery_quota": ANY,
"redis_pipeline": ANY,
}
assert isinstance(breaker.limiter, RedisSlidingWindowRateLimiter)
assert isinstance(breaker.primary_quota, Quota)
assert isinstance(breaker.recovery_quota, Quota)
assert breaker.primary_quota.__dict__ == {
"window_seconds": 3600,
"granularity_seconds": 180,
"limit": 200,
"prefix_override": "dogs_are_great.circuit_breaker.ok",
}
assert breaker.recovery_quota.__dict__ == {
"window_seconds": 3600,
"granularity_seconds": 180,
"limit": 20,
"prefix_override": "dogs_are_great.circuit_breaker.recovery",
}
assert isinstance(breaker.redis_pipeline, Pipeline)
@patch("sentry.utils.circuit_breaker2.logger")
def test_fixes_too_loose_recovery_limit(self, mock_logger: MagicMock) -> None:
config: CircuitBreakerConfig = {
**DEFAULT_CONFIG,
"error_limit": 200,
"recovery_error_limit": 400,
}
for settings_debug_value, expected_log_function in [
(True, mock_logger.error),
(False, mock_logger.warning),
]:
settings.DEBUG = settings_debug_value
breaker = MockCircuitBreaker("dogs_are_great", config)
expected_log_function.assert_called_with(
"Circuit breaker '%s' has a recovery error limit (%d) greater than or equal"
+ " to its primary error limit (%d). Using the stricter error-limit-based"
+ " default (%d) instead.",
breaker.key,
400,
200,
20,
)
assert breaker.recovery_error_limit == 20
@patch("sentry.utils.circuit_breaker2.logger")
def test_fixes_mismatched_state_durations(self, mock_logger: MagicMock) -> None:
config: CircuitBreakerConfig = {
**DEFAULT_CONFIG,
"error_limit_window": 600,
"broken_state_duration": 100,
"recovery_duration": 200,
}
for settings_debug_value, expected_log_function in [
(True, mock_logger.error),
(False, mock_logger.warning),
]:
settings.DEBUG = settings_debug_value
breaker = MockCircuitBreaker("dogs_are_great", config)
expected_log_function.assert_called_with(
"Circuit breaker '%s' has BROKEN and RECOVERY state durations (%d and %d sec, respectively)"
+ " which together are less than the main error limit window (%d sec). This can lead to the"
+ " breaker getting tripped unexpectedly, until the original spike in errors clears the"
+ " main time window. Extending RECOVERY period to %d seconds, to give the primary quota time"
+ " to clear.",
breaker.key,
100,
200,
600,
500,
)
assert breaker.recovery_duration == 500
@freeze_time()
| CircuitBreakerTest |
python | python-pillow__Pillow | src/PIL/ImageFilter.py | {
"start": 3294,
"end": 3615
} | class ____(RankFilter):
"""
Create a median filter. Picks the median pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Median"
def __init__(self, size: int = 3) -> None:
self.size = size
self.rank = size * size // 2
| MedianFilter |
python | kamyu104__LeetCode-Solutions | Python/sort-even-and-odd-indices-independently.py | {
"start": 1872,
"end": 2720
} | class ____(object):
def sortEvenOdd(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def partition(index, nums):
for i in xrange(len(nums)):
j = i
while nums[i] >= 0:
j = index(j)
nums[i], nums[j] = nums[j], ~nums[i] # processed
for i in xrange(len(nums)):
nums[i] = ~nums[i] # restore values
partition(lambda i: i//2 if i%2 == 0 else (len(nums)+1)//2+i//2, nums)
nums[:(len(nums)+1)//2], nums[(len(nums)+1)//2:] = sorted(nums[:(len(nums)+1)//2]), sorted(nums[(len(nums)+1)//2:], reverse=True)
partition(lambda i: 2*i if i < (len(nums)+1)//2 else 1+2*(i-(len(nums)+1)//2), nums)
return nums
# Time: O(nlogn)
# Space: O(n)
# sort
| Solution2 |
python | tornadoweb__tornado | tornado/test/options_test.py | {
"start": 516,
"end": 11877
} | class ____(unittest.TestCase):
def test_parse_command_line(self):
options = OptionParser()
options.define("port", default=80)
options.parse_command_line(["main.py", "--port=443"])
self.assertEqual(options.port, 443)
def test_parse_config_file(self):
options = OptionParser()
options.define("port", default=80)
options.define("username", default="foo")
options.define("my_path")
config_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "options_test.cfg"
)
options.parse_config_file(config_path)
self.assertEqual(options.port, 443)
self.assertEqual(options.username, "李康")
self.assertEqual(options.my_path, config_path)
def test_parse_callbacks(self):
options = OptionParser()
self.called = False
def callback():
self.called = True
options.add_parse_callback(callback)
# non-final parse doesn't run callbacks
options.parse_command_line(["main.py"], final=False)
self.assertFalse(self.called)
# final parse does
options.parse_command_line(["main.py"])
self.assertTrue(self.called)
# callbacks can be run more than once on the same options
# object if there are multiple final parses
self.called = False
options.parse_command_line(["main.py"])
self.assertTrue(self.called)
def test_help(self):
options = OptionParser()
try:
orig_stderr = sys.stderr
sys.stderr = StringIO()
with self.assertRaises(SystemExit):
options.parse_command_line(["main.py", "--help"])
usage = sys.stderr.getvalue()
finally:
sys.stderr = orig_stderr
self.assertIn("Usage:", usage)
def test_subcommand(self):
base_options = OptionParser()
base_options.define("verbose", default=False)
sub_options = OptionParser()
sub_options.define("foo", type=str)
rest = base_options.parse_command_line(
["main.py", "--verbose", "subcommand", "--foo=bar"]
)
self.assertEqual(rest, ["subcommand", "--foo=bar"])
self.assertTrue(base_options.verbose)
rest2 = sub_options.parse_command_line(rest)
self.assertEqual(rest2, [])
self.assertEqual(sub_options.foo, "bar")
# the two option sets are distinct
try:
orig_stderr = sys.stderr
sys.stderr = StringIO()
with self.assertRaises(Error):
sub_options.parse_command_line(["subcommand", "--verbose"])
finally:
sys.stderr = orig_stderr
def test_setattr(self):
options = OptionParser()
options.define("foo", default=1, type=int)
options.foo = 2
self.assertEqual(options.foo, 2)
def test_setattr_type_check(self):
# setattr requires that options be the right type and doesn't
# parse from string formats.
options = OptionParser()
options.define("foo", default=1, type=int)
with self.assertRaises(Error):
options.foo = "2"
def test_setattr_with_callback(self):
values = [] # type: List[int]
options = OptionParser()
options.define("foo", default=1, type=int, callback=values.append)
options.foo = 2
self.assertEqual(values, [2])
def _sample_options(self):
options = OptionParser()
options.define("a", default=1)
options.define("b", default=2)
return options
def test_iter(self):
options = self._sample_options()
# OptionParsers always define 'help'.
self.assertEqual({"a", "b", "help"}, set(iter(options)))
def test_getitem(self):
options = self._sample_options()
self.assertEqual(1, options["a"])
def test_setitem(self):
options = OptionParser()
options.define("foo", default=1, type=int)
options["foo"] = 2
self.assertEqual(options["foo"], 2)
def test_items(self):
options = self._sample_options()
# OptionParsers always define 'help'.
expected = [("a", 1), ("b", 2), ("help", options.help)]
actual = sorted(options.items())
self.assertEqual(expected, actual)
def test_as_dict(self):
options = self._sample_options()
expected = {"a": 1, "b": 2, "help": options.help}
self.assertEqual(expected, options.as_dict())
def test_group_dict(self):
options = OptionParser()
options.define("a", default=1)
options.define("b", group="b_group", default=2)
frame = sys._getframe(0)
this_file = frame.f_code.co_filename
self.assertEqual({"b_group", "", this_file}, options.groups())
b_group_dict = options.group_dict("b_group")
self.assertEqual({"b": 2}, b_group_dict)
self.assertEqual({}, options.group_dict("nonexistent"))
def test_mock_patch(self):
# ensure that our setattr hooks don't interfere with mock.patch
options = OptionParser()
options.define("foo", default=1)
options.parse_command_line(["main.py", "--foo=2"])
self.assertEqual(options.foo, 2)
with mock.patch.object(options.mockable(), "foo", 3):
self.assertEqual(options.foo, 3)
self.assertEqual(options.foo, 2)
# Try nested patches mixed with explicit sets
with mock.patch.object(options.mockable(), "foo", 4):
self.assertEqual(options.foo, 4)
options.foo = 5
self.assertEqual(options.foo, 5)
with mock.patch.object(options.mockable(), "foo", 6):
self.assertEqual(options.foo, 6)
self.assertEqual(options.foo, 5)
self.assertEqual(options.foo, 2)
def _define_options(self):
options = OptionParser()
options.define("str", type=str)
options.define("basestring", type=basestring_type)
options.define("int", type=int)
options.define("float", type=float)
options.define("datetime", type=datetime.datetime)
options.define("timedelta", type=datetime.timedelta)
options.define("email", type=Email)
options.define("list-of-int", type=int, multiple=True)
options.define("list-of-str", type=str, multiple=True)
return options
def _check_options_values(self, options):
self.assertEqual(options.str, "asdf")
self.assertEqual(options.basestring, "qwer")
self.assertEqual(options.int, 42)
self.assertEqual(options.float, 1.5)
self.assertEqual(options.datetime, datetime.datetime(2013, 4, 28, 5, 16))
self.assertEqual(options.timedelta, datetime.timedelta(seconds=45))
self.assertEqual(options.email.value, "tornado@web.com")
self.assertTrue(isinstance(options.email, Email))
self.assertEqual(options.list_of_int, [1, 2, 3])
self.assertEqual(options.list_of_str, ["a", "b", "c"])
def test_types(self):
options = self._define_options()
options.parse_command_line(
[
"main.py",
"--str=asdf",
"--basestring=qwer",
"--int=42",
"--float=1.5",
"--datetime=2013-04-28 05:16",
"--timedelta=45s",
"--email=tornado@web.com",
"--list-of-int=1,2,3",
"--list-of-str=a,b,c",
]
)
self._check_options_values(options)
def test_types_with_conf_file(self):
for config_file_name in (
"options_test_types.cfg",
"options_test_types_str.cfg",
):
options = self._define_options()
options.parse_config_file(
os.path.join(os.path.dirname(__file__), config_file_name)
)
self._check_options_values(options)
def test_multiple_string(self):
options = OptionParser()
options.define("foo", type=str, multiple=True)
options.parse_command_line(["main.py", "--foo=a,b,c"])
self.assertEqual(options.foo, ["a", "b", "c"])
def test_multiple_int(self):
options = OptionParser()
options.define("foo", type=int, multiple=True)
options.parse_command_line(["main.py", "--foo=1,3,5:7"])
self.assertEqual(options.foo, [1, 3, 5, 6, 7])
def test_error_redefine(self):
options = OptionParser()
options.define("foo")
with self.assertRaises(Error) as cm:
options.define("foo")
self.assertRegex(str(cm.exception), "Option.*foo.*already defined")
def test_error_redefine_underscore(self):
# Ensure that the dash/underscore normalization doesn't
# interfere with the redefinition error.
tests = [
("foo-bar", "foo-bar"),
("foo_bar", "foo_bar"),
("foo-bar", "foo_bar"),
("foo_bar", "foo-bar"),
]
for a, b in tests:
with self.subTest(self, a=a, b=b):
options = OptionParser()
options.define(a)
with self.assertRaises(Error) as cm:
options.define(b)
self.assertRegex(str(cm.exception), "Option.*foo.bar.*already defined")
def test_dash_underscore_cli(self):
# Dashes and underscores should be interchangeable.
for defined_name in ["foo-bar", "foo_bar"]:
for flag in ["--foo-bar=a", "--foo_bar=a"]:
options = OptionParser()
options.define(defined_name)
options.parse_command_line(["main.py", flag])
# Attr-style access always uses underscores.
self.assertEqual(options.foo_bar, "a")
# Dict-style access allows both.
self.assertEqual(options["foo-bar"], "a")
self.assertEqual(options["foo_bar"], "a")
def test_dash_underscore_file(self):
# No matter how an option was defined, it can be set with underscores
# in a config file.
for defined_name in ["foo-bar", "foo_bar"]:
options = OptionParser()
options.define(defined_name)
options.parse_config_file(
os.path.join(os.path.dirname(__file__), "options_test.cfg")
)
self.assertEqual(options.foo_bar, "a")
def test_dash_underscore_introspection(self):
# Original names are preserved in introspection APIs.
options = OptionParser()
options.define("with-dash", group="g")
options.define("with_underscore", group="g")
all_options = ["help", "with-dash", "with_underscore"]
self.assertEqual(sorted(options), all_options)
self.assertEqual(sorted(k for (k, v) in options.items()), all_options)
self.assertEqual(sorted(options.as_dict().keys()), all_options)
self.assertEqual(
sorted(options.group_dict("g")), ["with-dash", "with_underscore"]
)
# --help shows CLI-style names with dashes.
buf = StringIO()
options.print_help(buf)
self.assertIn("--with-dash", buf.getvalue())
self.assertIn("--with-underscore", buf.getvalue())
| OptionsTest |
python | pypa__hatch | tests/project/test_config.py | {
"start": 5002,
"end": 111055
} | class ____:
def test_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs` must be a table"):
_ = ProjectConfig(isolation, {"envs": 9000}, PluginManager()).envs
def test_config_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo` must be a table"):
_ = ProjectConfig(isolation, {"envs": {"foo": 9000}}, PluginManager()).envs
def test_unknown_collector(self, isolation):
with pytest.raises(ValueError, match="Unknown environment collector: foo"):
_ = ProjectConfig(isolation, {"env": {"collectors": {"foo": {}}}}, PluginManager()).envs
def test_unknown_template(self, isolation):
with pytest.raises(
ValueError, match="Field `tool.hatch.envs.foo.template` refers to an unknown environment `bar`"
):
_ = ProjectConfig(isolation, {"envs": {"foo": {"template": "bar"}}}, PluginManager()).envs
def test_default_undefined(self, isolation):
project_config = ProjectConfig(isolation, {}, PluginManager())
assert project_config.envs == project_config.envs == {"default": {"type": "virtual"}}
assert project_config.matrices == project_config.matrices == {}
def test_default_partially_defined(self, isolation):
env_config = {"default": {"option": True}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
assert project_config.envs == {"default": {"option": True, "type": "virtual"}}
def test_default_defined(self, isolation):
env_config = {"default": {"type": "foo"}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
assert project_config.envs == {"default": {"type": "foo"}}
def test_basic(self, isolation):
env_config = {"foo": {"option": True}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
assert project_config.envs == {"default": {"type": "virtual"}, "foo": {"option": True, "type": "virtual"}}
def test_basic_override(self, isolation):
env_config = {"foo": {"type": "baz"}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
assert project_config.envs == {"default": {"type": "virtual"}, "foo": {"type": "baz"}}
def test_multiple_inheritance(self, isolation):
env_config = {
"foo": {"option1": "foo"},
"bar": {"template": "foo", "option2": "bar"},
"baz": {"template": "bar", "option3": "baz"},
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
assert project_config.envs == {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "option1": "foo"},
"bar": {"type": "virtual", "option1": "foo", "option2": "bar"},
"baz": {"type": "virtual", "option1": "foo", "option2": "bar", "option3": "baz"},
}
def test_circular_inheritance(self, isolation):
with pytest.raises(
ValueError, match="Circular inheritance detected for field `tool.hatch.envs.*.template`: foo -> bar -> foo"
):
_ = ProjectConfig(
isolation, {"envs": {"foo": {"template": "bar"}, "bar": {"template": "foo"}}}, PluginManager()
).envs
def test_scripts_inheritance(self, isolation):
env_config = {
"default": {"scripts": {"cmd1": "bar", "cmd2": "baz"}},
"foo": {"scripts": {"cmd1": "foo"}},
"bar": {"template": "foo", "scripts": {"cmd3": "bar"}},
"baz": {},
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
assert project_config.envs == {
"default": {"type": "virtual", "scripts": {"cmd1": "bar", "cmd2": "baz"}},
"foo": {"type": "virtual", "scripts": {"cmd1": "foo", "cmd2": "baz"}},
"bar": {"type": "virtual", "scripts": {"cmd1": "foo", "cmd2": "baz", "cmd3": "bar"}},
"baz": {"type": "virtual", "scripts": {"cmd1": "bar", "cmd2": "baz"}},
}
def test_self_referential(self, isolation):
env_config = {"default": {"option1": "foo"}, "bar": {"template": "bar"}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
assert project_config.envs == {
"default": {"type": "virtual", "option1": "foo"},
"bar": {"type": "virtual"},
}
def test_detached(self, isolation):
env_config = {"default": {"option1": "foo"}, "bar": {"detached": True}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
assert project_config.envs == {
"default": {"type": "virtual", "option1": "foo"},
"bar": {"type": "virtual", "skip-install": True},
}
def test_matrices_not_array(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.matrix` must be an array"):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix": 9000}}}, PluginManager()).envs
def test_matrix_not_table(self, isolation):
with pytest.raises(TypeError, match="Entry #1 in field `tool.hatch.envs.foo.matrix` must be a table"):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix": [9000]}}}, PluginManager()).envs
def test_matrix_empty(self, isolation):
with pytest.raises(ValueError, match="Matrix #1 in field `tool.hatch.envs.foo.matrix` cannot be empty"):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix": [{}]}}}, PluginManager()).envs
def test_matrix_variable_empty_string(self, isolation):
with pytest.raises(
ValueError, match="Variable #1 in matrix #1 in field `tool.hatch.envs.foo.matrix` cannot be an empty string"
):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix": [{"": []}]}}}, PluginManager()).envs
def test_matrix_variable_not_array(self, isolation):
with pytest.raises(
TypeError, match="Variable `bar` in matrix #1 in field `tool.hatch.envs.foo.matrix` must be an array"
):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix": [{"bar": 9000}]}}}, PluginManager()).envs
def test_matrix_variable_array_empty(self, isolation):
with pytest.raises(
ValueError, match="Variable `bar` in matrix #1 in field `tool.hatch.envs.foo.matrix` cannot be empty"
):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix": [{"bar": []}]}}}, PluginManager()).envs
def test_matrix_variable_entry_not_string(self, isolation):
with pytest.raises(
TypeError,
match="Value #1 of variable `bar` in matrix #1 in field `tool.hatch.envs.foo.matrix` must be a string",
):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix": [{"bar": [9000]}]}}}, PluginManager()).envs
def test_matrix_variable_entry_empty_string(self, isolation):
with pytest.raises(
ValueError,
match=(
"Value #1 of variable `bar` in matrix #1 in field `tool.hatch.envs.foo.matrix` "
"cannot be an empty string"
),
):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix": [{"bar": [""]}]}}}, PluginManager()).envs
def test_matrix_variable_entry_duplicate(self, isolation):
with pytest.raises(
ValueError,
match="Value #2 of variable `bar` in matrix #1 in field `tool.hatch.envs.foo.matrix` is a duplicate",
):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix": [{"bar": ["1", "1"]}]}}}, PluginManager()).envs
def test_matrix_multiple_python_variables(self, isolation):
with pytest.raises(
ValueError,
match="Matrix #1 in field `tool.hatch.envs.foo.matrix` cannot contain both `py` and `python` variables",
):
_ = ProjectConfig(
isolation,
{"envs": {"foo": {"matrix": [{"py": ["39", "310"], "python": ["39", "311"]}]}}},
PluginManager(),
).envs
def test_matrix_name_format_not_string(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.matrix-name-format` must be a string"):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix-name-format": 9000}}}, PluginManager()).envs
def test_matrix_name_format_invalid(self, isolation):
with pytest.raises(
ValueError,
match="Field `tool.hatch.envs.foo.matrix-name-format` must contain at least the `{value}` placeholder",
):
_ = ProjectConfig(isolation, {"envs": {"foo": {"matrix-name-format": "bar"}}}, PluginManager()).envs
def test_overrides_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.overrides` must be a table"):
_ = ProjectConfig(isolation, {"envs": {"foo": {"overrides": 9000}}}, PluginManager()).envs
def test_overrides_platform_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.overrides.platform` must be a table"):
_ = ProjectConfig(isolation, {"envs": {"foo": {"overrides": {"platform": 9000}}}}, PluginManager()).envs
def test_overrides_env_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.overrides.env` must be a table"):
_ = ProjectConfig(isolation, {"envs": {"foo": {"overrides": {"env": 9000}}}}, PluginManager()).envs
def test_overrides_matrix_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.overrides.matrix` must be a table"):
_ = ProjectConfig(
isolation,
{"envs": {"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": 9000}}}},
PluginManager(),
).envs
def test_overrides_name_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.overrides.name` must be a table"):
_ = ProjectConfig(
isolation,
{"envs": {"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"name": 9000}}}},
PluginManager(),
).envs
def test_overrides_platform_entry_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.overrides.platform.bar` must be a table"):
_ = ProjectConfig(
isolation, {"envs": {"foo": {"overrides": {"platform": {"bar": 9000}}}}}, PluginManager()
).envs
def test_overrides_env_entry_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.overrides.env.bar` must be a table"):
_ = ProjectConfig(isolation, {"envs": {"foo": {"overrides": {"env": {"bar": 9000}}}}}, PluginManager()).envs
def test_overrides_matrix_entry_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.overrides.matrix.bar` must be a table"):
_ = ProjectConfig(
isolation,
{"envs": {"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"bar": 9000}}}}},
PluginManager(),
).envs
def test_overrides_name_entry_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.envs.foo.overrides.name.bar` must be a table"):
_ = ProjectConfig(
isolation,
{"envs": {"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"name": {"bar": 9000}}}}},
PluginManager(),
).envs
def test_matrix_simple_no_python(self, isolation):
env_config = {"foo": {"option": True, "matrix": [{"version": ["9000", "3.14"]}]}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", "option": True},
"foo.3.14": {"type": "virtual", "option": True},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
def test_matrix_simple_no_python_custom_name_format(self, isolation):
env_config = {
"foo": {
"option": True,
"matrix-name-format": "{variable}_{value}",
"matrix": [{"version": ["9000", "3.14"]}],
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.version_9000": {"type": "virtual", "option": True},
"foo.version_3.14": {"type": "virtual", "option": True},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("indicator", ["py", "python"])
def test_matrix_simple_only_python(self, isolation, indicator):
env_config = {"foo": {"option": True, "matrix": [{indicator: ["39", "310"]}]}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.py39": {"type": "virtual", "option": True, "python": "39"},
"foo.py310": {"type": "virtual", "option": True, "python": "310"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("indicator", ["py", "python"])
def test_matrix_simple(self, isolation, indicator):
env_config = {"foo": {"option": True, "matrix": [{"version": ["9000", "3.14"], indicator: ["39", "310"]}]}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.py39-9000": {"type": "virtual", "option": True, "python": "39"},
"foo.py39-3.14": {"type": "virtual", "option": True, "python": "39"},
"foo.py310-9000": {"type": "virtual", "option": True, "python": "310"},
"foo.py310-3.14": {"type": "virtual", "option": True, "python": "310"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("indicator", ["py", "python"])
def test_matrix_simple_custom_name_format(self, isolation, indicator):
env_config = {
"foo": {
"option": True,
"matrix-name-format": "{variable}_{value}",
"matrix": [{"version": ["9000", "3.14"], indicator: ["39", "310"]}],
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.py39-version_9000": {"type": "virtual", "option": True, "python": "39"},
"foo.py39-version_3.14": {"type": "virtual", "option": True, "python": "39"},
"foo.py310-version_9000": {"type": "virtual", "option": True, "python": "310"},
"foo.py310-version_3.14": {"type": "virtual", "option": True, "python": "310"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
def test_matrix_multiple_non_python(self, isolation):
env_config = {
"foo": {
"option": True,
"matrix": [{"version": ["9000", "3.14"], "py": ["39", "310"], "foo": ["baz", "bar"]}],
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.py39-9000-baz": {"type": "virtual", "option": True, "python": "39"},
"foo.py39-9000-bar": {"type": "virtual", "option": True, "python": "39"},
"foo.py39-3.14-baz": {"type": "virtual", "option": True, "python": "39"},
"foo.py39-3.14-bar": {"type": "virtual", "option": True, "python": "39"},
"foo.py310-9000-baz": {"type": "virtual", "option": True, "python": "310"},
"foo.py310-9000-bar": {"type": "virtual", "option": True, "python": "310"},
"foo.py310-3.14-baz": {"type": "virtual", "option": True, "python": "310"},
"foo.py310-3.14-bar": {"type": "virtual", "option": True, "python": "310"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
def test_matrix_series(self, isolation):
env_config = {
"foo": {
"option": True,
"matrix": [
{"version": ["9000", "3.14"], "py": ["39", "310"], "foo": ["baz", "bar"]},
{"version": ["9000"], "py": ["310"], "baz": ["foo", "test"], "bar": ["foobar"]},
],
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.py39-9000-baz": {"type": "virtual", "option": True, "python": "39"},
"foo.py39-9000-bar": {"type": "virtual", "option": True, "python": "39"},
"foo.py39-3.14-baz": {"type": "virtual", "option": True, "python": "39"},
"foo.py39-3.14-bar": {"type": "virtual", "option": True, "python": "39"},
"foo.py310-9000-baz": {"type": "virtual", "option": True, "python": "310"},
"foo.py310-9000-bar": {"type": "virtual", "option": True, "python": "310"},
"foo.py310-3.14-baz": {"type": "virtual", "option": True, "python": "310"},
"foo.py310-3.14-bar": {"type": "virtual", "option": True, "python": "310"},
"foo.py310-9000-foo-foobar": {"type": "virtual", "option": True, "python": "310"},
"foo.py310-9000-test-foobar": {"type": "virtual", "option": True, "python": "310"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
def test_matrices_not_inherited(self, isolation):
env_config = {
"foo": {"option1": True, "matrix": [{"py": ["39"]}]},
"bar": {"template": "foo", "option2": False},
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.py39": {"type": "virtual", "option1": True, "python": "39"},
"bar": {"type": "virtual", "option1": True, "option2": False},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
def test_matrix_default_naming(self, isolation):
env_config = {"default": {"option": True, "matrix": [{"version": ["9000", "3.14"]}]}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"9000": {"type": "virtual", "option": True},
"3.14": {"type": "virtual", "option": True},
}
assert project_config.envs == expected_envs
assert project_config.matrices["default"] == construct_matrix_data("default", env_config)
def test_matrix_pypy_naming(self, isolation):
env_config = {"foo": {"option": True, "matrix": [{"py": ["python3.9", "pypy3"]}]}}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.python3.9": {"type": "virtual", "option": True, "python": "python3.9"},
"foo.pypy3": {"type": "virtual", "option": True, "python": "pypy3"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=f"Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must be a string or an array",
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: 9000}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_entry_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a string or an inline table"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [9000]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_table_entry_no_key(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f"Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must have an option named `key`"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: [{}]}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_table_entry_key_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `key` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a string"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"key": 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_table_entry_key_empty_string(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f"Option `key` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"cannot be an empty string"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"key": ""}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_table_entry_value_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `value` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a string"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"key": "foo", "value": 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_table_entry_if_not_array(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `if` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be an array"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {
"matrix": {"version": {option: [{"key": "foo", "value": "bar", "if": 9000}]}}
},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_invalid_type(self, isolation, option):
with pytest.raises(
TypeError, match=f"Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must be an array"
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: 9000}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_entry_no_value(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f"Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must have an option named `value`"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: [{}]}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_entry_value_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `value` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a string"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_entry_value_empty_string(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f"Option `value` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"cannot be an empty string"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": ""}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_entry_if_not_array(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `if` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be an array"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": "foo", "if": 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_entry_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a string or an inline table"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [9000]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a string, inline table, or an array"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: 9000}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_table_no_value(self, isolation, option):
with pytest.raises(
ValueError,
match=f"Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must have an option named `value`",
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: {}}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_table_value_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=f"Option `value` in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must be a string",
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: {"value": 9000}}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_array_entry_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a string or an inline table"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [9000]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_array_table_no_value(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f"Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must have an option named `value`"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: [{}]}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_array_table_value_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `value` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a string"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_array_table_if_not_array(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `if` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be an array"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": "foo", "if": 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a boolean, inline table, or an array"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: 9000}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_table_no_value(self, isolation, option):
with pytest.raises(
ValueError,
match=f"Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must have an option named `value`",
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: {}}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_table_value_not_boolean(self, isolation, option):
with pytest.raises(
TypeError,
match=f"Option `value` in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must be a boolean",
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: {"value": 9000}}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_entry_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a boolean or an inline table"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [9000]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_no_value(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f"Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must have an option named `value`"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: [{}]}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_value_not_boolean(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `value` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be a boolean"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_if_not_array(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `if` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be an array"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": True, "if": 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_platform_not_array(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `platform` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be an array"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": True, "platform": 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_platform_item_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Item #1 in option `platform` in entry #1 in field "
f"`tool.hatch.envs.foo.overrides.matrix.version.{option}` must be a string"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": True, "platform": [9000]}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_env_not_array(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Option `env` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` "
f"must be an array"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": True, "env": 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_env_item_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f"Item #1 in option `env` in entry #1 in field "
f"`tool.hatch.envs.foo.overrides.matrix.version.{option}` must be a string"
),
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {option: [{"value": True, "env": [9000]}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_string_with_value(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: "FOO=ok"}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"FOO": "ok"}},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_string_without_value(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: "FOO"}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"FOO": "9000"}},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_string_override(self, isolation, option):
env_config = {
"foo": {
option: {"TEST": "baz"},
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: "TEST"}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"TEST": "9000"}},
"foo.bar": {"type": "virtual", option: {"TEST": "baz"}},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_string_with_value(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: ["FOO=ok"]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"FOO": "ok"}},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_string_without_value(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: ["FOO"]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"FOO": "9000"}},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_string_override(self, isolation, option):
env_config = {
"foo": {
option: {"TEST": "baz"},
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: ["TEST"]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"TEST": "9000"}},
"foo.bar": {"type": "virtual", option: {"TEST": "baz"}},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_table_key_with_value(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"key": "FOO", "value": "ok"}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"FOO": "ok"}},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_table_key_without_value(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"key": "FOO"}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"FOO": "9000"}},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_table_override(self, isolation, option):
env_config = {
"foo": {
option: {"TEST": "baz"},
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"key": "TEST"}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"TEST": "9000"}},
"foo.bar": {"type": "virtual", option: {"TEST": "baz"}},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_table_conditional(self, isolation, option):
env_config = {
"foo": {
option: {"TEST": "baz"},
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"key": "TEST", "if": ["42"]}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"TEST": "baz"}},
"foo.42": {"type": "virtual", option: {"TEST": "42"}},
"foo.bar": {"type": "virtual", option: {"TEST": "baz"}},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", MAPPING_OPTIONS)
def test_overrides_matrix_mapping_overwrite(self, isolation, option):
env_config = {
"foo": {
option: {"TEST": "baz"},
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {f"set-{option}": ["FOO=bar", {"key": "BAZ"}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"FOO": "bar", "BAZ": "9000"}},
"foo.bar": {"type": "virtual", option: {"TEST": "baz"}},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_string(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: ["run foo"]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run foo"]},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_string_existing_append(self, isolation, option):
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: ["run foo"]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run baz", "run foo"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": "run foo"}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run foo"]},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_existing_append(self, isolation, option):
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": "run foo"}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run baz", "run foo"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_conditional(self, isolation, option):
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": "run foo", "if": ["42"]}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run baz"]},
"foo.42": {"type": "virtual", option: ["run baz", "run foo"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_conditional_with_platform(self, isolation, option, current_platform):
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {
"matrix": {
"version": {option: [{"value": "run foo", "if": ["42"], "platform": [current_platform]}]}
},
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run baz"]},
"foo.42": {"type": "virtual", option: ["run baz", "run foo"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_conditional_with_wrong_platform(self, isolation, option):
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {
"matrix": {"version": {option: [{"value": "run foo", "if": ["42"], "platform": ["bar"]}]}},
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run baz"]},
"foo.42": {"type": "virtual", option: ["run baz"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_conditional_with_env_var_match(self, isolation, option):
env_var = "OVERRIDES_ENV_FOO"
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {
"matrix": {"version": {option: [{"value": "run foo", "if": ["42"], "env": [f"{env_var}=bar"]}]}}
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run baz"]},
"foo.42": {"type": "virtual", option: ["run baz", "run foo"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
with EnvVars({env_var: "bar"}):
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_conditional_with_env_var_match_empty_string(self, isolation, option):
env_var = "OVERRIDES_ENV_FOO"
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {
"matrix": {"version": {option: [{"value": "run foo", "if": ["42"], "env": [f"{env_var}="]}]}}
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run baz"]},
"foo.42": {"type": "virtual", option: ["run baz", "run foo"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
with EnvVars({env_var: ""}):
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_conditional_with_env_var_present(self, isolation, option):
env_var = "OVERRIDES_ENV_FOO"
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": "run foo", "if": ["42"], "env": [env_var]}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run baz"]},
"foo.42": {"type": "virtual", option: ["run baz", "run foo"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
with EnvVars({env_var: "any"}):
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_conditional_with_env_var_no_match(self, isolation, option):
env_var = "OVERRIDES_ENV_FOO"
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {
"matrix": {"version": {option: [{"value": "run foo", "if": ["42"], "env": [f"{env_var}=bar"]}]}}
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run baz"]},
"foo.42": {"type": "virtual", option: ["run baz"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
with EnvVars({env_var: "baz"}):
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_table_conditional_with_env_var_missing(self, isolation, option):
env_var = "OVERRIDES_ENV_FOO"
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {
"matrix": {"version": {option: [{"value": "run foo", "if": ["42"], "env": [f"{env_var}=bar"]}]}}
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run baz"]},
"foo.42": {"type": "virtual", option: ["run baz"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
def test_overrides_matrix_set_with_no_type_information(self, isolation):
env_var = "OVERRIDES_ENV_FOO"
env_config = {
"foo": {
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {
"matrix": {"version": {"bar": {"value": ["baz"], "if": ["42"], "env": [f"{env_var}=bar"]}}}
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual"},
"foo.42": {"type": "virtual", "bar": ["baz"]},
"foo.bar": {"type": "virtual"},
}
with EnvVars({env_var: "bar"}):
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
def test_overrides_matrix_set_with_no_type_information_not_table(self, isolation):
project_config = ProjectConfig(
isolation,
{
"envs": {
"foo": {
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {"bar": 9000}}},
}
}
},
PluginManager(),
)
_ = project_config.envs
with pytest.raises(
ValueError,
match=(
"Untyped option `tool.hatch.envs.foo.9000.overrides.matrix.version.bar` "
"must be defined as a table with a `value` key"
),
):
project_config.finalize_env_overrides({})
@pytest.mark.parametrize("option", ARRAY_OPTIONS)
def test_overrides_matrix_array_overwrite(self, isolation, option):
env_config = {
"foo": {
option: ["run baz"],
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {f"set-{option}": ["run foo", {"value": "run bar"}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: ["run foo", "run bar"]},
"foo.bar": {"type": "virtual", option: ["run baz"]},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_string_create(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: "baz"}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: "baz"},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_string_overwrite(self, isolation, option):
env_config = {
"foo": {
option: "test",
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: "baz"}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: "baz"},
"foo.bar": {"type": "virtual", option: "test"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_table_create(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: {"value": "baz"}}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: "baz"},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_table_override(self, isolation, option):
env_config = {
"foo": {
option: "test",
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: {"value": "baz"}}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: "baz"},
"foo.bar": {"type": "virtual", option: "test"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_table_conditional(self, isolation, option):
env_config = {
"foo": {
option: "test",
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: {"value": "baz", "if": ["42"]}}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: "test"},
"foo.42": {"type": "virtual", option: "baz"},
"foo.bar": {"type": "virtual", option: "test"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_array_table_create(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": "baz"}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: "baz"},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_array_table_override(self, isolation, option):
env_config = {
"foo": {
option: "test",
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": "baz"}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: "baz"},
"foo.bar": {"type": "virtual", option: "test"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_array_table_conditional(self, isolation, option):
env_config = {
"foo": {
option: "test",
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": "baz", "if": ["42"]}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: "test"},
"foo.42": {"type": "virtual", option: "baz"},
"foo.bar": {"type": "virtual", option: "test"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_array_table_conditional_eager_string(self, isolation, option):
env_config = {
"foo": {
option: "test",
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: ["baz", {"value": "foo", "if": ["42"]}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: "baz"},
"foo.42": {"type": "virtual", option: "baz"},
"foo.bar": {"type": "virtual", option: "test"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", STRING_OPTIONS)
def test_overrides_matrix_string_array_table_conditional_eager_table(self, isolation, option):
env_config = {
"foo": {
option: "test",
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": "baz", "if": ["42"]}, "foo"]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: "foo"},
"foo.42": {"type": "virtual", option: "baz"},
"foo.bar": {"type": "virtual", option: "test"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_boolean_create(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: True}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: True},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_boolean_overwrite(self, isolation, option):
env_config = {
"foo": {
option: False,
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: True}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: True},
"foo.bar": {"type": "virtual", option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_table_create(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: {"value": True}}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: True},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_table_override(self, isolation, option):
env_config = {
"foo": {
option: False,
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: {"value": True}}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: True},
"foo.bar": {"type": "virtual", option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_table_conditional(self, isolation, option):
env_config = {
"foo": {
option: False,
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: {"value": True, "if": ["42"]}}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: False},
"foo.42": {"type": "virtual", option: True},
"foo.bar": {"type": "virtual", option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_create(self, isolation, option):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": True}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: True},
"foo.bar": {"type": "virtual"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_override(self, isolation, option):
env_config = {
"foo": {
option: False,
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": True}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: True},
"foo.bar": {"type": "virtual", option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_conditional(self, isolation, option):
env_config = {
"foo": {
option: False,
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": True, "if": ["42"]}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: False},
"foo.42": {"type": "virtual", option: True},
"foo.bar": {"type": "virtual", option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_conditional_eager_boolean(self, isolation, option):
env_config = {
"foo": {
option: False,
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [True, {"value": False, "if": ["42"]}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: True},
"foo.42": {"type": "virtual", option: True},
"foo.bar": {"type": "virtual", option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_conditional_eager_table(self, isolation, option):
env_config = {
"foo": {
option: False,
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: [{"value": True, "if": ["42"]}, False]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: False},
"foo.42": {"type": "virtual", option: True},
"foo.bar": {"type": "virtual", option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
# We assert type coverage using matrix variable overrides, for the others just test one type
def test_overrides_platform_boolean_boolean_create(self, isolation, current_platform):
env_config = {
"foo": {
"overrides": {"platform": {"bar": {"dependencies": ["baz"]}, current_platform: {"skip-install": True}}}
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "skip-install": True},
}
assert project_config.envs == expected_envs
def test_overrides_platform_boolean_boolean_overwrite(self, isolation, current_platform):
env_config = {
"foo": {
"skip-install": True,
"overrides": {
"platform": {"bar": {"dependencies": ["baz"]}, current_platform: {"skip-install": False}}
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "skip-install": False},
}
assert project_config.envs == expected_envs
def test_overrides_platform_boolean_table_create(self, isolation, current_platform):
env_config = {
"foo": {
"overrides": {
"platform": {
"bar": {"dependencies": ["baz"]},
current_platform: {"skip-install": [{"value": True}]},
}
}
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "skip-install": True},
}
assert project_config.envs == expected_envs
def test_overrides_platform_boolean_table_overwrite(self, isolation, current_platform):
env_config = {
"foo": {
"skip-install": True,
"overrides": {
"platform": {
"bar": {"dependencies": ["baz"]},
current_platform: {"skip-install": [{"value": False}]},
}
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "skip-install": False},
}
assert project_config.envs == expected_envs
def test_overrides_env_boolean_boolean_create(self, isolation):
env_var_exists = "OVERRIDES_ENV_FOO"
env_var_missing = "OVERRIDES_ENV_BAR"
env_config = {
"foo": {
"overrides": {
"env": {env_var_missing: {"dependencies": ["baz"]}, env_var_exists: {"skip-install": True}}
}
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "skip-install": True},
}
with EnvVars({env_var_exists: "any"}):
assert project_config.envs == expected_envs
def test_overrides_env_boolean_boolean_overwrite(self, isolation):
env_var_exists = "OVERRIDES_ENV_FOO"
env_var_missing = "OVERRIDES_ENV_BAR"
env_config = {
"foo": {
"skip-install": True,
"overrides": {
"env": {env_var_missing: {"dependencies": ["baz"]}, env_var_exists: {"skip-install": False}}
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "skip-install": False},
}
with EnvVars({env_var_exists: "any"}):
assert project_config.envs == expected_envs
def test_overrides_env_boolean_table_create(self, isolation):
env_var_exists = "OVERRIDES_ENV_FOO"
env_var_missing = "OVERRIDES_ENV_BAR"
env_config = {
"foo": {
"overrides": {
"env": {
env_var_missing: {"dependencies": ["baz"]},
env_var_exists: {"skip-install": [{"value": True}]},
}
}
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "skip-install": True},
}
with EnvVars({env_var_exists: "any"}):
assert project_config.envs == expected_envs
def test_overrides_env_boolean_table_overwrite(self, isolation):
env_var_exists = "OVERRIDES_ENV_FOO"
env_var_missing = "OVERRIDES_ENV_BAR"
env_config = {
"foo": {
"skip-install": True,
"overrides": {
"env": {
env_var_missing: {"dependencies": ["baz"]},
env_var_exists: {"skip-install": [{"value": False}]},
}
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "skip-install": False},
}
with EnvVars({env_var_exists: "any"}):
assert project_config.envs == expected_envs
def test_overrides_env_boolean_conditional(self, isolation):
env_var_exists = "OVERRIDES_ENV_FOO"
env_var_missing = "OVERRIDES_ENV_BAR"
env_config = {
"foo": {
"overrides": {
"env": {
env_var_missing: {"dependencies": ["baz"]},
env_var_exists: {"skip-install": [{"value": True, "if": ["foo"]}]},
}
}
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "skip-install": True},
}
with EnvVars({env_var_exists: "foo"}):
assert project_config.envs == expected_envs
def test_overrides_name_boolean_boolean_create(self, isolation):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"name": {"bar$": {"skip-install": True}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual"},
"foo.bar": {"type": "virtual", "skip-install": True},
}
assert project_config.envs == expected_envs
def test_overrides_name_boolean_boolean_overwrite(self, isolation):
env_config = {
"foo": {
"skip-install": True,
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"name": {"bar$": {"skip-install": False}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", "skip-install": True},
"foo.bar": {"type": "virtual", "skip-install": False},
}
assert project_config.envs == expected_envs
def test_overrides_name_boolean_table_create(self, isolation):
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"name": {"bar$": {"skip-install": [{"value": True}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual"},
"foo.bar": {"type": "virtual", "skip-install": True},
}
assert project_config.envs == expected_envs
def test_overrides_name_boolean_table_overwrite(self, isolation):
env_config = {
"foo": {
"skip-install": True,
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"name": {"bar$": {"skip-install": [{"value": False}]}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", "skip-install": True},
"foo.bar": {"type": "virtual", "skip-install": False},
}
assert project_config.envs == expected_envs
# Tests for source precedence
def test_overrides_name_precedence_over_matrix(self, isolation):
env_config = {
"foo": {
"skip-install": False,
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {
"name": {"42$": {"skip-install": False}},
"matrix": {"version": {"skip-install": [{"value": True, "if": ["42"]}]}},
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", "skip-install": False},
"foo.42": {"type": "virtual", "skip-install": False},
"foo.bar": {"type": "virtual", "skip-install": False},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config, {"skip-install": False})
def test_overrides_matrix_precedence_over_platform(self, isolation, current_platform):
env_config = {
"foo": {
"skip-install": False,
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {
"platform": {current_platform: {"skip-install": True}},
"matrix": {"version": {"skip-install": [{"value": False, "if": ["42"]}]}},
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", "skip-install": True},
"foo.42": {"type": "virtual", "skip-install": False},
"foo.bar": {"type": "virtual", "skip-install": True},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config, {"skip-install": True})
def test_overrides_matrix_precedence_over_env(self, isolation):
env_var = "OVERRIDES_ENV_FOO"
env_config = {
"foo": {
"skip-install": False,
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {
"env": {env_var: {"skip-install": True}},
"matrix": {"version": {"skip-install": [{"value": False, "if": ["42"]}]}},
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", "skip-install": True},
"foo.42": {"type": "virtual", "skip-install": False},
"foo.bar": {"type": "virtual", "skip-install": True},
}
with EnvVars({env_var: "any"}):
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config, {"skip-install": True})
def test_overrides_env_precedence_over_platform(self, isolation, current_platform):
env_var = "OVERRIDES_ENV_FOO"
env_config = {
"foo": {
"overrides": {
"platform": {current_platform: {"skip-install": True}},
"env": {env_var: {"skip-install": [{"value": False, "if": ["foo"]}]}},
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo": {"type": "virtual", "skip-install": False},
}
with EnvVars({env_var: "foo"}):
assert project_config.envs == expected_envs
# Test for options defined by environment plugins
def test_overrides_for_environment_plugins(self, isolation, current_platform):
env_var = "OVERRIDES_ENV_FOO"
env_config = {
"foo": {
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {
"platform": {current_platform: {"foo": True}},
"env": {env_var: {"bar": [{"value": "foobar", "if": ["foo"]}]}},
"matrix": {"version": {"baz": "BAR=ok"}},
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual"},
"foo.bar": {"type": "virtual"},
}
with EnvVars({env_var: "foo"}):
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
project_config.finalize_env_overrides({"foo": bool, "bar": str, "baz": dict})
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", "foo": True, "bar": "foobar", "baz": {"BAR": "ok"}},
"foo.bar": {"type": "virtual", "foo": True, "bar": "foobar"},
}
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
# Test environment collectors
def test_environment_collector_finalize_config(self, helpers, temp_dir):
file_path = temp_dir / DEFAULT_CUSTOM_SCRIPT
file_path.write_text(
helpers.dedent(
"""
from hatch.env.collectors.plugin.interface import EnvironmentCollectorInterface
class CustomHook(EnvironmentCollectorInterface):
def finalize_config(self, config):
config['default']['type'] = 'foo'
"""
)
)
env_config = {
"foo": {
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {"type": {"value": "baz", "if": ["42"]}}}},
}
}
project_config = ProjectConfig(
temp_dir, {"envs": env_config, "env": {"collectors": {"custom": {}}}}, PluginManager()
)
expected_envs = {
"default": {"type": "foo"},
"foo.9000": {"type": "foo"},
"foo.42": {"type": "baz"},
"foo.bar": {"type": "foo"},
}
with temp_dir.as_cwd():
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config, {"type": "foo"})
def test_environment_collector_finalize_environments(self, helpers, temp_dir):
file_path = temp_dir / DEFAULT_CUSTOM_SCRIPT
file_path.write_text(
helpers.dedent(
"""
from hatch.env.collectors.plugin.interface import EnvironmentCollectorInterface
class CustomHook(EnvironmentCollectorInterface):
def finalize_environments(self, config):
config['foo.42']['type'] = 'foo'
"""
)
)
env_config = {
"foo": {
"matrix": [{"version": ["9000", "42"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {"type": {"value": "baz", "if": ["42"]}}}},
}
}
project_config = ProjectConfig(
temp_dir, {"envs": env_config, "env": {"collectors": {"custom": {}}}}, PluginManager()
)
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual"},
"foo.42": {"type": "foo"},
"foo.bar": {"type": "virtual"},
}
with temp_dir.as_cwd():
assert project_config.envs == expected_envs
assert project_config.matrices["foo"] == construct_matrix_data("foo", env_config)
@pytest.mark.parametrize("option", WORKSPACE_OPTIONS)
def test_overrides_matrix_workspace_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=f"Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must be a table",
):
_ = ProjectConfig(
isolation,
{
"envs": {
"foo": {"matrix": [{"version": ["9000"]}], "overrides": {"matrix": {"version": {option: 9000}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize("option", WORKSPACE_OPTIONS)
def test_overrides_matrix_workspace_members_append(self, isolation, option):
env_config = {
"foo": {
option: {"members": ["packages/core"]},
"matrix": [{"version": ["9000"]}, {"feature": ["bar"]}],
"overrides": {"matrix": {"version": {option: {"members": ["packages/extra"]}}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"members": ["packages/core", "packages/extra"]}},
"foo.bar": {"type": "virtual", option: {"members": ["packages/core"]}},
}
assert project_config.envs == expected_envs
@pytest.mark.parametrize("option", WORKSPACE_OPTIONS)
def test_overrides_matrix_workspace_members_conditional(self, isolation, option):
env_config = {
"foo": {
option: {"members": ["packages/core"]},
"matrix": [{"version": ["9000", "42"]}],
"overrides": {
"matrix": {"version": {option: {"members": [{"value": "packages/special", "if": ["42"]}]}}}
},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"members": ["packages/core"]}},
"foo.42": {"type": "virtual", option: {"members": ["packages/core", "packages/special"]}},
}
assert project_config.envs == expected_envs
@pytest.mark.parametrize("option", WORKSPACE_OPTIONS)
def test_overrides_matrix_workspace_parallel(self, isolation, option):
env_config = {
"foo": {
option: {"members": ["packages/*"], "parallel": True},
"matrix": [{"version": ["9000", "42"]}],
"overrides": {"matrix": {"version": {option: {"parallel": {"value": False, "if": ["42"]}}}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"members": ["packages/*"], "parallel": True}},
"foo.42": {"type": "virtual", option: {"members": ["packages/*"], "parallel": False}},
}
assert project_config.envs == expected_envs
@pytest.mark.parametrize("option", WORKSPACE_OPTIONS)
def test_overrides_matrix_workspace_overwrite(self, isolation, option):
env_config = {
"foo": {
option: {"members": ["packages/core"], "parallel": True},
"matrix": [{"version": ["9000"]}],
"overrides": {"matrix": {"version": {f"set-{option}": {"members": ["packages/new"]}}}},
}
}
project_config = ProjectConfig(isolation, {"envs": env_config}, PluginManager())
expected_envs = {
"default": {"type": "virtual"},
"foo.9000": {"type": "virtual", option: {"members": ["packages/new"]}},
}
assert project_config.envs == expected_envs
| TestEnvs |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/polymorphic_function_test.py | {
"start": 6822,
"end": 151812
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
cpus = config.list_physical_devices('CPU')
# Set 4 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def testBasic(self):
matmul = polymorphic_function.function(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
sq2 = matmul(sq, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
self.assertAllEqual(sq2.numpy().reshape(-1), [52, 76, 74, 108])
def testPythonFunctionNotCallable(self):
with self.assertRaisesRegex(TypeError, 'is not a callable object'):
polymorphic_function.function(1)
def testOnExitCallback(self):
values = []
def append_1():
values.append(1)
def append_2():
values.append(2)
def g(x):
old_values = list(values)
ops.add_exit_callback_to_default_func_graph(append_1)
self.assertEqual(old_values, values)
return x + 1
tf_g = polymorphic_function.function(g)
def f(x):
old_values = list(values)
ops.add_exit_callback_to_default_func_graph(append_2)
self.assertEqual(old_values, values)
return tf_g(x)
tf_f = polymorphic_function.function(f)
self.assertEmpty(values)
tf_f(constant_op.constant(1.0))
self.assertEqual(values, [1, 2]) # Once for g, once for f.
tf_f(constant_op.constant([1.0])) # force a retrace
self.assertEqual(values, [1, 2, 1, 2]) # And again.
def testCannotAddExitCallbackWhenNotInFunctionScope(self):
with self.assertRaisesRegex(RuntimeError, 'when not building a function.'):
ops.add_exit_callback_to_default_func_graph(lambda: None)
def testVariable(self):
v1 = variables.Variable(1.0)
add = polymorphic_function.function(lambda x, v: x + v1 + v)
v2 = variables.Variable(1.0)
x = constant_op.constant(1.0)
r = add(x, v2)
self.assertEqual(3.0, self.evaluate(r))
def testVariableOnly(self):
v = variables.Variable(1.0)
add = polymorphic_function.function(lambda x: x.assign_add(1.0))
r1 = add(v)
self.assertEqual(2.0, self.evaluate(r1))
c = constant_op.constant(1.0)
with self.assertRaisesRegex(AttributeError, 'no attribute'):
add(c)
def testVariableMultiFunction(self):
@polymorphic_function.function
def second(dup_var, dup_var_2, some_const):
return dup_var + dup_var_2 + some_const
@polymorphic_function.function
def first(dup_var, some_const):
return second(dup_var, dup_var, some_const)
my_const = constant_op.constant(1)
my_var = variables.Variable(2, dtype=dtypes.int32)
self.assertEqual(second(my_var, my_var, my_const).numpy(), 5)
self.assertEqual(first(my_var, my_const).numpy(), 5)
@test_util.disable_tfrt('Packed tensor is not supported in tfrt yet.')
def testPackedVariable(self):
with ops.device('/cpu:0'):
v0_0 = resource_variable_ops.ResourceVariable(1.0)
with ops.device('/cpu:1'):
v0_1 = resource_variable_ops.ResourceVariable(2.0)
v1_0 = resource_variable_ops.ResourceVariable(3.0)
with ops.device('/cpu:2'):
v1_1 = resource_variable_ops.ResourceVariable(4.0)
packed_var_0 = ops.pack_eager_tensors([v0_0.handle, v0_1.handle])
packed_var_1 = ops.pack_eager_tensors([v1_0.handle, v1_1.handle])
# TODO(b/145922293): use ResourceVariable.assign_add and
# ResourceVariable.read_value directly once we support packing multiple
# ResourceVariable into one ResourceVariable.
@polymorphic_function.function
def read_var():
resource_variable_ops.assign_add_variable_op(packed_var_0,
constant_op.constant(5.0))
resource_variable_ops.assign_add_variable_op(packed_var_1,
constant_op.constant(6.0))
with ops.device('/cpu:0'):
read0 = resource_variable_ops.read_variable_op(
packed_var_0, dtype=dtypes.float32)
with ops.device('/cpu:1'):
read1 = resource_variable_ops.read_variable_op(
packed_var_0, dtype=dtypes.float32)
read2 = resource_variable_ops.read_variable_op(
packed_var_1, dtype=dtypes.float32)
with ops.device('/cpu:2'):
read3 = resource_variable_ops.read_variable_op(
packed_var_1, dtype=dtypes.float32)
return read0, read1, read2, read3
arg_attrs = read_var.get_concrete_function().function_def.arg_attr
self.assertLen(arg_attrs, 2)
self.assertEqual(arg_attrs[0].attr['_composite_device'].s,
compat.as_bytes(packed_var_0.device))
self.assertEqual(arg_attrs[1].attr['_composite_device'].s,
compat.as_bytes(packed_var_1.device))
self.assertAllEqual(read_var(), (1 + 5, 2 + 5, 3 + 6, 4 + 6))
def testImplementsAttributeBasic(self):
v = polymorphic_function.function(
experimental_implements='func')(lambda x, y: x + y)
with context.graph_mode(), self.cached_session():
a = array_ops.placeholder(dtypes.float32, ())
b = array_ops.placeholder(dtypes.float32, ())
v(a, b)
gradients_impl.gradients(v(a, b), [a, b])
fdefs = ops.get_default_graph().as_graph_def().library.function
self.assertLen(fdefs, 3)
not_present = 0
present = 0
for f in fdefs:
name = f.signature.name
if 'forward' in name or 'backward' in name:
not_present += 1
self.assertNotIn(attributes_lib.IMPLEMENTS,
f.attr, f)
else:
present += 1
self.assertEqual(
f.attr[attributes_lib.IMPLEMENTS].s,
'func'.encode('ascii'), f)
self.assertEqual(not_present, 2, fdefs)
self.assertEqual(present, 1, fdefs)
def testImplementsAttributeAssertsOnSideInput(self):
with context.graph_mode(), self.cached_session():
z = array_ops.zeros(0)
v = polymorphic_function.function(
experimental_implements='func')(lambda x, y: x + y + z)
a = array_ops.ones((1,))
b = array_ops.ones((1,))
with self.assertRaisesRegex(AssertionError,
'variables are always captured'):
v(a, b)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertEmpty(functions)
def testImplementsAttributeWorksWithGradientTape(self):
add = lambda x, y: x + y**2
add = polymorphic_function.function(experimental_implements='MyFunc')(add)
x = variables.Variable(3.0)
y = variables.Variable(2.0)
with backprop.GradientTape() as tape:
g = add(x, y)
dg_dy, dg_dx = tape.gradient(g, [y, x])
self.assertEqual(dg_dy.numpy(), 4.0)
self.assertEqual(dg_dx.numpy(), 1.0)
def testImplementsAttributeWorksOnVariables(self):
with context.graph_mode(), self.cached_session():
v = polymorphic_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable((1.0,))
b = variables.Variable((1.0,))
r1 = v(a, b)
_ = v(a, a)
functions = ops.get_default_graph().as_graph_def().library.function
# Verify that we created only one function
self.assertLen(functions, 1)
# Verify that self.evaluate() reads the current values.
a.initializer.run()
b.initializer.run()
self.assertEqual(self.evaluate(r1), 2)
self.evaluate(a.assign_add([1]))
self.assertEqual(self.evaluate(r1), 3)
def testImplementsAttributeWorksOnConstants(self):
with context.graph_mode(), self.cached_session():
v = polymorphic_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable(1.0)
r1 = v(a, 2.)
r2 = v(2., a)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertLen(functions, 1)
self.assertLen(functions[0].signature.input_arg, 2)
# Verify that self.evaluate() reads the current values.
a.initializer.run()
self.assertEqual(self.evaluate(r1), 3)
self.assertEqual(self.evaluate(r2), 3)
def testImplementsAttributeSpecializes(self):
with context.graph_mode(), self.cached_session():
v = polymorphic_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable(1.0)
r1 = v(a, [2.])
r2 = v([2., 2], a)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertLen(functions, 2)
# Ensure that all parameters are still there and haven't been inlined!
self.assertLen(functions[0].signature.input_arg, 2)
self.assertLen(functions[1].signature.input_arg, 2)
# Verify that self.evaluate() reads the current values.
a.initializer.run()
numpy.testing.assert_equal(self.evaluate(r1), [3.])
numpy.testing.assert_equal(self.evaluate(r2), [3., 3.])
def testImplementsWorksWithTensorSpec(self):
v = polymorphic_function.function(
experimental_implements='func')(lambda x, y: x + y)
v = v.get_concrete_function(
tensor_lib.TensorSpec(shape=None, dtype=dtypes.float32),
tensor_lib.TensorSpec(shape=None, dtype=dtypes.float32))
x = v(1., 2.)
self.assertEqual(x.numpy(), 3.)
def testImplementsAttributeAsNameAttrList(self):
implements_attr = (
'name: "embedding_matmul" attr { key: "key1" value { i: 2 } '
'} attr { key: "key2" value { b: false } }')
v = polymorphic_function.function(
experimental_implements=implements_attr)(lambda x, y: x + y)
with context.graph_mode(), self.cached_session():
a = array_ops.placeholder(dtypes.float32, ())
b = array_ops.placeholder(dtypes.float32, ())
v(a, b)
gradients_impl.gradients(v(a, b), [a, b])
fdefs = ops.get_default_graph().as_graph_def().library.function
self.assertLen(fdefs, 3)
not_present = 0
present = 0
for f in fdefs:
name = f.signature.name
if 'forward' in name or 'backward' in name:
not_present += 1
self.assertNotIn(attributes_lib.IMPLEMENTS,
f.attr, f)
else:
present += 1
attr_value = f.attr[attributes_lib.IMPLEMENTS]
self.assertIsNotNone(attr_value.func, f)
self.assertEqual(attr_value.func.name, 'embedding_matmul')
name_attrs = attr_value.func.attr
self.assertLen(name_attrs, 2)
self.assertEqual(not_present, 2, fdefs)
self.assertEqual(present, 1, fdefs)
def testDisableACDAttribute(self):
v = resource_variable_ops.ResourceVariable(1.0)
def foo(x, y):
nonlocal v
t = v.read_value()
v.assign_add(x + y)
return t
with_acd = polymorphic_function.function(foo)
without_acd = polymorphic_function.function(
foo, experimental_attributes={'_disable_acd': True}
)
with_acd_control_outputs = with_acd.get_concrete_function(
tensor_lib.TensorSpec(shape=None, dtype=dtypes.float32),
tensor_lib.TensorSpec(shape=None, dtype=dtypes.float32),
).graph.control_outputs
without_acd_control_outputs = without_acd.get_concrete_function(
tensor_lib.TensorSpec(shape=None, dtype=dtypes.float32),
tensor_lib.TensorSpec(shape=None, dtype=dtypes.float32),
).graph.control_outputs
self.assertLen(with_acd_control_outputs, 2)
self.assertEmpty(without_acd_control_outputs)
def testReduceTracingWithNestedTFFunction(self):
v = resource_variable_ops.ResourceVariable([1., 2.])
@polymorphic_function.function(reduce_retracing=True)
def inner_test_fn(x):
x.assign_add([2., 2.])
return x
@polymorphic_function.function(reduce_retracing=True)
def test_fn(x):
x.assign_add([1., 1.])
return inner_test_fn(x)
with backprop.GradientTape() as tape:
y = test_fn(v)
grad = tape.gradient(y, v)
self.assertAllEqual(y, [4., 5.])
self.assertAllEqual(grad, [1., 1.])
with backprop.GradientTape() as tape:
y = test_fn(v)
grad = tape.gradient(y, v)
self.assertAllEqual(y, [7., 8.])
self.assertAllEqual(grad, [1., 1.])
def testInputShapeRelaxationOnInstanceMethod(self):
# Test that reduce_retracing is passed during
# instance method bounding.
unknown_dim = [False]
class Foo:
@polymorphic_function.function(reduce_retracing=True)
def func(self, a):
if a._shape_tuple()[0] is None:
unknown_dim[0] = True
return a + 1
foo = Foo()
foo.func(constant_op.constant([]))
self.assertFalse(unknown_dim[0])
foo.func(constant_op.constant([1.0]))
self.assertTrue(unknown_dim[0])
foo.func(constant_op.constant([1.0, 2.0]))
self.assertTrue(unknown_dim[0])
def testInputShapeFunctionRelaxationWithRaggedTensors(self):
traced_type_spec = [None]
@polymorphic_function.function(reduce_retracing=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
check_trace( # Initial call gets traced.
ragged_factory_ops.constant([[1], [2, 3, 4]]),
ragged_tensor.RaggedTensorSpec([2, None], dtypes.int32))
check_trace( # Input TypeSpec is the same -> no retrace.
ragged_factory_ops.constant([[1, 2], [3, 4]]), None)
check_trace( # Even if component tensor shapes change -> no retrace.
ragged_factory_ops.constant([[1, 2], [3, 4, 5, 6]]), None)
check_trace( # Different TypeSpec shape (nrows): relax & retrace
ragged_factory_ops.constant([[1], [2], [3]]),
ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32))
check_trace( # Different nrows again: relax & retrace
ragged_factory_ops.constant([[1], [2], [3], [4]]), None)
check_trace( # Different nrows yet again: not retrace
ragged_factory_ops.constant([[1]]), None)
check_trace( # Different ragged_rank: retrace
ragged_factory_ops.constant([[[1]]]),
ragged_tensor.RaggedTensorSpec([1, None, None], dtypes.int32))
check_trace( # Different ragged_rank again: retrace & relax
ragged_factory_ops.constant([[[1]], [[2]]]),
ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32))
def testInputShapeFunctionRelaxationWithStructuredTensors(self):
traced_type_spec = [None]
@polymorphic_function.function(reduce_retracing=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
# If we have TypeSpecs that differ in ways other than just their shape,
# then retrace each time.
check_trace(
structured_tensor.StructuredTensor.from_pyval({'a': [1]}),
structured_tensor.StructuredTensor.Spec._from_fields_and_rank(
fields={'a': tensor_lib.TensorSpec((1,), dtypes.int32)}, rank=0))
check_trace(
structured_tensor.StructuredTensor.from_pyval({'b': [1]}),
structured_tensor.StructuredTensor.Spec._from_fields_and_rank(
fields={'b': tensor_lib.TensorSpec((1,), dtypes.int32)}, rank=0))
check_trace(
structured_tensor.StructuredTensor.from_pyval({'c': [1]}),
structured_tensor.StructuredTensor.Spec._from_fields_and_rank(
fields={'c': tensor_lib.TensorSpec((1,), dtypes.int32)}, rank=0))
# But if we call again with only shape different, then do relax:
check_trace( # relax & retrace
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2]}),
structured_tensor.StructuredTensor.Spec._from_fields_and_rank(
fields={'a': tensor_lib.TensorSpec((None,), dtypes.int32)},
rank=0))
check_trace( # use relaxed graph
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2, 3]}), None)
check_trace( # use relaxed graph
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2, 3, 4]}),
None)
def testInputShapeFunctionRelaxationWithDatasetIterators(self):
# For dataset iterators, the TypeSpec includes type information that's
# not derivable from the component tensors. Make sure that the TypeSpec
# shapes get relaxed as appropriate.
traced_type_spec = [None]
@polymorphic_function.function(reduce_retracing=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
ds_1_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([1, 2]))
ds_2_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([2, 2]))
ds_3_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([3, 2]))
ds_4_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([4, 2]))
ds_2_1 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([2, 1]))
check_trace( # shape=[1, 2]: retrace
dataset_ops.make_one_shot_iterator(ds_1_2),
iterator_ops.IteratorSpec(
tensor_lib.TensorSpec([1, 2], dtypes.float32)))
check_trace( # shape=[1, 2]: no retrace (use the [1, 2] graph)
dataset_ops.make_one_shot_iterator(ds_1_2), None)
check_trace( # shape=[2, 2]: relax to [None, 2] and retrace
dataset_ops.make_one_shot_iterator(ds_2_2),
iterator_ops.IteratorSpec(
tensor_lib.TensorSpec([None, 2], dtypes.float32)))
check_trace( # shape=[3, 2]: no retrace (use the [None, 2] graph)
dataset_ops.make_one_shot_iterator(ds_3_2), None)
check_trace( # shape=[4, 2]: no retrace (use the [None, 2] graph)
dataset_ops.make_one_shot_iterator(ds_4_2), None)
check_trace( # shape=[2, 1]: relax to [None, None] and retrace
dataset_ops.make_one_shot_iterator(ds_2_1),
iterator_ops.IteratorSpec(
tensor_lib.TensorSpec([None, None], dtypes.float32)))
def testCapturesVariables(self):
a = variables.Variable(1.0, trainable=False)
b = variables.Variable(1.0)
cc = [None]
@polymorphic_function.function
def f():
c = cc[0]
if c is None:
c = cc[0] = variables.Variable(1.)
return a + b + c + 1
cf = f.get_concrete_function()
c = cc[0]
captured_variables = {v.ref() for v in (a, b, c)}
trainable_variables = {v.ref() for v in (b, c)}
self.assertEqual({v.ref() for v in cf.variables}, captured_variables)
self.assertEqual({v.ref() for v in cf.trainable_variables},
trainable_variables)
self.assertEqual(cf.variables, cf.graph.variables)
self.assertEqual(cf.trainable_variables, cf.graph.trainable_variables)
def testNestedShapeFunctionRelaxation(self):
traced_shape = None
# The inner function will go through shape relaxation because the shapes it
# receives will be [1], [2], [3], ...
@polymorphic_function.function(reduce_retracing=True)
def bar(x_shape):
nonlocal traced_shape
traced_shape = x_shape._shape_tuple()
return x_shape
# The outer function will not go through shape relaxation because the shapes
# it receives will be [1], [[1]], [[[1]]], ...
@polymorphic_function.function(reduce_retracing=True)
def foo(ones):
return bar(array_ops.shape(ones))
self.assertAllEqual(self.evaluate(foo(array_ops.ones([1]))), [1])
self.assertEqual(traced_shape, (1,))
for rank in range(2, 6):
x_shape = self.evaluate(foo(array_ops.ones([1] * rank)))
self.assertAllEqual(x_shape, [1] * rank)
self.assertEqual(traced_shape, (None,))
def testNoHash(self):
@polymorphic_function.function()
def f(_):
return 1.0
with self.assertRaisesRegex(
TypeError, r'Could not generate a generic TraceType'):
f(set([]))
def testBasicGraphMode(self):
matmul = polymorphic_function.function(math_ops.matmul)
@polymorphic_function.function
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = sq(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedInputsGraphMode(self):
matmul = polymorphic_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@polymorphic_function.function
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = a_times_b(pair({'a': t}, {'b': t}))
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputsGraphMode(self):
matmul = polymorphic_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@polymorphic_function.function()
def pairs_mul(pair_a, pair_b):
return pair(matmul(pair_a.a, pair_b.a), matmul(pair_a.b, pair_b.b))
a = constant_op.constant([[1.0, 2.0], [1.0, 2.0]])
b = constant_op.constant([[3.0, 4.0], [3.0, 4.0]])
out = pairs_mul(pair(a, b), pair(b, a))
expected = pair(
math_ops.matmul(a, b).numpy(),
math_ops.matmul(b, a).numpy())
self.assertAllClose(out, expected)
def testNestedFunctionGraphNotOutOfDate(self):
@polymorphic_function.function
def f():
return constant_op.constant(1.)
class _Model(object):
@polymorphic_function.function
def g(self):
self.f = f.get_concrete_function()
model = _Model()
model.g()
concrete = model.f
weak_g_graph = weakref.ref(model.g.get_concrete_function().graph)
self.assertIs(weak_g_graph(), concrete.graph.outer_graph)
weak_g = weakref.ref(model.g)
del model
self.assertIsNone(weak_g())
self.assertIsNone(weak_g_graph())
self.assertIsNotNone(concrete.graph.outer_graph)
self.assertIs(ops.get_default_graph(), concrete.graph.outer_graph)
def testBasicGraphFunction(self):
matmul = polymorphic_function.function(math_ops.matmul)
@polymorphic_function.function
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testGetConcreteFunctionThreadSafety(self):
@polymorphic_function.function
def sq():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
return math_ops.matmul(t, t)
concrete_functions = []
def thread_func(_):
cf = sq.get_concrete_function()
concrete_functions.append(cf)
num_threads = 100
pool = multiprocessing.pool.ThreadPool(num_threads)
_ = pool.map(thread_func, list(range(num_threads)))
self.assertLen(set(concrete_functions), 1)
def testGetConcreteFunctionThreadSafetyWithArgs(self):
@polymorphic_function.function
def add_100(*args):
return math_ops.add_n(args)
p = multiprocessing.pool.ThreadPool(2)
args = (constant_op.constant(1.),) * 100
f1, f2 = p.map(add_100.get_concrete_function, [args] * 2)
# I see about len(args) + max(0, len(args) - 3) arguments expected.
f1(*args)
del f2
def testInputSpecGraphFunction(self):
matmul = polymorphic_function.function(math_ops.matmul)
@polymorphic_function.function
def sq(a):
return matmul(a, a)
sq_op = sq.get_concrete_function(
tensor_lib.TensorSpec((None, None), dtypes.float32))
self.assertEqual([None, None], sq_op.output_shapes.as_list())
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out1 = sq_op(t1)
self.assertAllEqual(out1, math_ops.matmul(t1, t1).numpy())
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out2 = sq_op(t2)
self.assertAllEqual(out2, math_ops.matmul(t2, t2).numpy())
def testNestedInputSpecGraphFunction(self):
matmul = polymorphic_function.function(math_ops.matmul)
@polymorphic_function.function
def sq(mats):
((a, b),) = mats
return matmul(a, b)
sq_op_autonamed = sq.get_concrete_function([(
tensor_lib.TensorSpec((None, None), dtypes.float32),
tensor_lib.TensorSpec((None, None), dtypes.float32),
)])
self.assertEqual([None, None], sq_op_autonamed.output_shapes.as_list())
sq_op = sq.get_concrete_function([(
tensor_lib.TensorSpec((None, None), dtypes.float32, name='first_mat'),
tensor_lib.TensorSpec((None, None), dtypes.float32, name='second_mat'),
)])
self.assertEqual([None, None], sq_op.output_shapes.as_list())
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.4, 2.4], [3.4, 4.4]])
out = sq_op(first_mat=t1, second_mat=t2)
self.assertAllEqual(out, math_ops.matmul(t1, t2).numpy())
self.assertAllEqual(
sq_op_autonamed(t1, t2),
math_ops.matmul(t1, t2).numpy())
def testExecutingStatelessDefunConcurrently(self):
@polymorphic_function.function
def stateless(x):
return math_ops.multiply(2.0, x)
pool = multiprocessing.pool.ThreadPool()
inputs = [constant_op.constant(1.0 * x) for x in range(100)]
outputs = [float(out) for out in pool.map(stateless, inputs)]
expected = [float(2.0 * x) for x in inputs]
self.assertSequenceEqual(outputs, expected)
def testExecutingManyStatelessDefunsConcurrently(self):
@polymorphic_function.function
def stateless(x):
del x
return math_ops.multiply(2.0, 2.0)
pool = multiprocessing.pool.ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
objects = [object() for _ in range(100)]
outputs = [float(out) for out in pool.map(stateless, objects)]
expected = [4.0] * 100
self.assertSequenceEqual(outputs, expected)
@test_util.disable_tfrt('b/169431085: This test is flaky on tfrt')
def testExecutingStatefulDefunConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@polymorphic_function.function
def stateful(x):
v.assign(x)
pool = multiprocessing.pool.ThreadPool()
inputs = [constant_op.constant(0.0)] * 100
pool.map(stateful, inputs)
self.assertEqual(float(v.read_value()), 0.0)
def testExecutingManyStatefulDefunsConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@polymorphic_function.function
def stateful(x):
del x
return v.assign(0.0)
pool = multiprocessing.pool.ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
pool.map(stateful, [object() for _ in range(100)])
self.assertEqual(float(v.read_value()), 0.0)
def testShareRendezvous(self):
# Disable grappler from inlining the functions. Note we run the send & recv
# in graph mode since with eager mode the function should automatically be
# inlined.
context.context().set_optimizer_experimental_options(
{'disable_meta_optimizer': True})
cpu = '/device:CPU:0'
signature = [tensor_lib.TensorSpec([], dtypes.int32)]
@polymorphic_function.function
def send():
x = constant_op.constant(1)
gen_sendrecv_ops.send(x, 'x', cpu, 0, cpu)
return x
send._shared_rendezvous = True # pylint: disable=protected-access
@polymorphic_function.function(input_signature=signature)
def send_body(n):
send()
return n - 1
@polymorphic_function.function
def recv():
return gen_sendrecv_ops.recv(dtypes.int32, 'x', cpu, 0, cpu)
recv._shared_rendezvous = True # pylint: disable=protected-access
@polymorphic_function.function(input_signature=signature)
def recv_body(n):
recv()
return n - 1
@polymorphic_function.function(input_signature=signature)
def cond_fn(n):
return n > 0
# Instead of calling the send & recv functions directly we want to call them
# through a functional while to ensure the rendezvous is shared across the
# while boundary.
@polymorphic_function.function
def fn(n):
functional_ops.While([n], cond_fn.get_concrete_function(),
send_body.get_concrete_function())
return functional_ops.While([n], cond_fn.get_concrete_function(),
recv_body.get_concrete_function())
# Use a graph context since functions will not be automatically inlined
with context.graph_mode(), self.cached_session():
self.evaluate(fn(2))
def disabled_testRandomSeed(self):
@polymorphic_function.function
def f():
return random_ops.random_normal(())
random_seed.set_random_seed(1)
x = f()
self.assertNotEqual(x, f())
random_seed.set_random_seed(1)
self.assertAllEqual(f(), x)
def testNestedInputsGraphFunction(self):
matmul = polymorphic_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@polymorphic_function.function
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = a_times_b.get_concrete_function(
pair(
dict(a=tensor_lib.TensorSpec([2, 2], dtypes.float32, 'a')),
dict(b=tensor_lib.TensorSpec([2, 2], dtypes.float32, 'b'))))
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(a=t, b=t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputGraphFunction(self):
matmul = polymorphic_function.function(math_ops.matmul)
@polymorphic_function.function
def sq(a):
return (matmul(a, a), {'b': constant_op.constant(1.0)})
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes, (tensor_shape.TensorShape([2, 2]), {
'b': tensor_shape.TensorShape([])
}))
self.assertEqual(sq_op.output_dtypes, (dtypes.float32, {
'b': dtypes.float32
}))
(a, b) = sq_op(t)
self.assertAllEqual(a, math_ops.matmul(t, t).numpy())
self.assertAllEqual(b['b'].numpy(), 1.0)
def testZipStrictBuiltin(self):
major, minor, _ = platform.python_version_tuple()
if not (major == '3' and int(minor) >= 10):
self.skipTest('strict zip is only supported in Python 3.10+')
@polymorphic_function.function
def foo(x):
return list(zip([x], [x], strict=True))
self.assertEqual(foo(2)[0][0].numpy(), 2)
def testGraphFunctionNoneOutput(self):
@polymorphic_function.function
def fn(unused_a, unused_b):
return None
x = constant_op.constant(1)
fn_op = fn.get_concrete_function(x, x)
self.assertEqual(fn_op.output_dtypes, None)
self.assertEqual(fn_op.output_shapes, None)
self.assertAllEqual(fn_op(x, x), None)
def testDefunCapturedInt32(self):
x = constant_op.constant(1, dtype=dtypes.int32)
@polymorphic_function.function
def add_int32s():
return x + x
self.assertEqual(2, int(add_int32s()))
def testDefunReadVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@polymorphic_function.function
def f():
return v.read_value()
self.assertEqual(1.0, float(f()))
def testDefunAssignAddVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
x = constant_op.constant(2.0)
@polymorphic_function.function
def test_assign_add():
v.assign_add(x)
return v.read_value()
self.assertEqual(3.0, float(test_assign_add()))
@test_util.run_in_graph_and_eager_modes
def testTensorInitializationInFunctionRaisesError(self):
@polymorphic_function.function
def tensor_init():
with self.assertRaisesRegex(ValueError, 'could not be lifted out'):
resource_variable_ops.ResourceVariable(constant_op.constant(2.0))
tensor_init()
@test_util.run_in_graph_and_eager_modes
def testCallableTensorInitializationInFunction(self):
@polymorphic_function.function
def tensor_init():
self.v = resource_variable_ops.ResourceVariable(
lambda: constant_op.constant(2.0))
return self.v.read_value()
value = tensor_init()
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(value), 2.0)
@test_util.also_run_as_tf_function
def testInitScopeTensorInitializationInFunction(self):
@polymorphic_function.function
def tensor_init():
with ops.init_scope():
const = constant_op.constant(2.0)
# Note: this variable bypasses tf.function's variable creation
# requirements by bypassing variable_creator_scope by using
# ResourceVariable instead of Variable.
self.v = resource_variable_ops.ResourceVariable(const)
return self.v.read_value()
value = tensor_init()
self.assertAllEqual(value, 2.0)
@test_util.run_in_graph_and_eager_modes
def testGetConcreteFunctionCreatesVariables(self):
v_holder = []
@polymorphic_function.function
def tensor_init():
if not v_holder:
v_holder.append(variables.Variable(5.))
return v_holder[0].read_value()
concrete = tensor_init.get_concrete_function()
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(5., self.evaluate(concrete()))
self.assertAllEqual(5., self.evaluate(tensor_init()))
def testDefunShapeInferenceWithCapturedResourceVariable(self):
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# We do not return v directly since the tensor conversion function of
# ResourceVariable returns the read value and not the resource itself.
return v._handle
compiled = polymorphic_function.function(f)
var_handle = compiled()
self.assertEqual(var_handle.dtype, dtypes.resource)
self.assertEqual(var_handle.shape, tensor_shape.TensorShape([]))
var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)
self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))
def testShapeInferenceForMoreSpecificInput(self):
def f(a):
return array_ops.reshape(a, [-1, 3])
signature = [tensor_lib.TensorSpec(None, dtypes.float32)]
compiled = polymorphic_function.function(f, input_signature=signature)
@polymorphic_function.function
def use_f():
inputs = array_ops.zeros([10, 10, 3])
self.assertAllEqual(f(inputs).shape, compiled(inputs).shape)
use_f()
def testDefunShapeInferenceWithCapturedResourceVariableInGraphMode(self):
with context.graph_mode():
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# We do not return v directly since the tensor conversion function of
# ResourceVariable returns the read value and not the resource itself.
return v._handle
compiled = polymorphic_function.function(f)
var_handle = compiled()
self.assertEqual(var_handle.dtype, dtypes.resource)
self.assertEqual(var_handle.shape, tensor_shape.TensorShape([]))
var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)
self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))
def testDefunShapeInferenceWithCapturedVariableInGraphMode(self):
with context.graph_mode():
v = variables.Variable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# Check that shape inference works while creating the defun
compiled = polymorphic_function.function(f)
compiled()
def testDefunShapeInferenceWithCapturedTensorListInGraphMode(self):
with context.graph_mode():
tensor_list = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=ops.convert_to_tensor([], dtype=dtypes.int32))
tensor_list = list_ops.tensor_list_push_back(tensor_list,
constant_op.constant(1.0))
tensor_list = list_ops.tensor_list_push_back(tensor_list,
constant_op.constant(2.0))
def f():
tl, value = list_ops.tensor_list_pop_back(
tensor_list, element_dtype=dtypes.float32)
self.assertEqual(value.shape, tensor_shape.TensorShape([]))
return tl
compiled = polymorphic_function.function(f)
output_tensor_list = compiled()
_, value = list_ops.tensor_list_pop_back(
output_tensor_list, element_dtype=dtypes.float32)
self.assertEqual(value.shape, tensor_shape.TensorShape([]))
def testRunMetadata(self):
@polymorphic_function.function
def f(x):
return x * x
with ops.device('cpu:0'):
context.enable_run_metadata()
f(constant_op.constant(1.0))
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
self.assertLen(run_metadata.partition_graphs, 1)
def testGraphModeCaptureVariable(self):
with context.graph_mode(), self.cached_session():
class HasAVar:
def __init__(self):
self.v = resource_variable_ops.ResourceVariable(1.0)
def call(self):
return self.v * 2
o = HasAVar()
self.evaluate(variables.global_variables_initializer())
call = polymorphic_function.function(o.call)
op = call()
self.assertAllEqual(self.evaluate(op), 2.0)
def testGraphModeManyFunctions(self):
with ops.Graph().as_default(), self.cached_session():
@polymorphic_function.function
def f(x):
return x * x
@polymorphic_function.function
def g(x):
return f(x) + 1
self.assertAllEqual(g(constant_op.constant(2.0)), 5.0)
def testDict(self):
@polymorphic_function.function
def f(x):
return {'name': x + 1}
self.assertAllEqual(f(constant_op.constant(1.0))['name'], 2.0)
def testWeakrefInputsRejected(self):
@polymorphic_function.function
def f(x):
return x
class Dummy:
pass
o = Dummy()
wr = weakref.ref(o)
with self.assertRaisesRegex(TypeError, 'weakref'):
f(wr)
def testTensorConversionWithDefun(self):
@polymorphic_function.function
def f(x):
return math_ops.add(x, constant_op.constant(3))
self.assertAllEqual(5, f(constant_op.constant(2)))
def testTensorConversionCall(self):
@polymorphic_function.function
def f(x):
return math_ops.add(x, constant_op.constant(3))
@polymorphic_function.function
def g(x):
return f(f(x))
self.assertAllEqual(8, g(constant_op.constant(2)))
def testCallShape(self):
@polymorphic_function.function
def f(x):
return x + 1
@polymorphic_function.function
def g(x):
x = f(x)
self.assertEqual(x.shape.as_list(), [])
return None
g(constant_op.constant(1.0))
def testNestedDefunWithNoOutputAndTapedInput(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@polymorphic_function.function
def f(x):
# This function intentionally takes a taped variable as input,
# but does not return any values
math_ops.add(x, three)
@polymorphic_function.function
def g(x):
y = math_ops.add(x, three)
f(y)
g(three)
def testGatherResourceWithDefun(self):
with ops.device('cpu:0'):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
defined = polymorphic_function.function(sum_gather)
self.assertAllEqual(sum_gather(), defined())
@parameterized.named_parameters([
('IndexedSlicesWithDenseShape',
_example_indexed_slices_with_dense_shape,),
('IndexedSlicesWithoutDenseShape',
_example_indexed_slices_without_dense_shape,),
('RaggedTensorRaggedRank1', ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}),
('RaggedTensorRaggedRank2',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}),
('SparseTensor', sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}),
]) # pyformat: disable
def testReturnCompositeTensorWithDefun(self,
factory_fn,
factory_kwargs={},
input_signature=None):
input_ct = factory_fn(**factory_kwargs)
@polymorphic_function.function(input_signature=input_signature)
def f():
return input_ct
output_ct = f()
self.assertIsInstance(output_ct, type(input_ct))
nest.assert_same_structure(input_ct, output_ct, expand_composites=True)
input_flat = nest.flatten(input_ct, expand_composites=True)
output_flat = nest.flatten(output_ct, expand_composites=True)
for (input_component, output_component) in zip(input_flat, output_flat):
self.assertAllEqual(input_component, output_component)
@parameterized.named_parameters([
('IndexedSlicesWithDenseShape',
_example_indexed_slices_with_dense_shape,),
('IndexedSlicesWithoutDenseShape',
_example_indexed_slices_without_dense_shape,),
('RaggedTensorRaggedRank1',
ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}),
('RaggedTensorRaggedRank2',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}),
('SparseTensor',
sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}),
('RaggedTensorRaggedRank1WithSignature',
ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]},
[ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)]),
('RaggedTensorRaggedRank2WithSignature',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]},
[ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32)]),
('SparseTensorWithSignature',
sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]},
[sparse_tensor.SparseTensorSpec([None], dtypes.int32)]),
]) # pyformat: disable
def testCompositeAsArgumentTensorWithDefun(self,
factory_fn,
factory_kwargs={},
input_signature=None):
input_ct = factory_fn(**factory_kwargs)
@polymorphic_function.function(input_signature=input_signature)
def f(x):
return x
output_ct = f(input_ct)
self.assertIsInstance(output_ct, type(input_ct))
nest.assert_same_structure(input_ct, output_ct, expand_composites=True)
input_flat = nest.flatten(input_ct, expand_composites=True)
output_flat = nest.flatten(output_ct, expand_composites=True)
for (input_component, output_component) in zip(input_flat, output_flat):
self.assertAllEqual(input_component, output_component)
def testTracedCompositeDiscardsShapeInfo(self):
# SparseTensorSpec intentionally excludes info about the number of elements
# that are in a sparse tensor (which is recorded as st.indices.shape[0] and
# st.values.shape[0]). Similarly, RaggedTensorSpec intentionally excludes
# info about the total number of values in a RaggedTensor (stored as
# rt.values.shape[0]). This test checks that the placeholders created by
# tf.function() properly mask this shape info.
@polymorphic_function.function
def f(rt, st):
self.assertEqual(st.indices.shape.as_list()[:1], [None])
self.assertEqual(st.values.shape.as_list(), [None])
return (rt, st)
rt = ragged_factory_ops.constant([[1, 2], [3]])
st = sparse_tensor.SparseTensor([[0]], [0], [10])
f(rt, st)
@test_util.run_gpu_only
def testFunctionOnDevice(self):
x = constant_op.constant([1.]).gpu()
f = polymorphic_function.function(math_ops.add)
y = f(x, x).cpu()
self.assertAllEqual(y, [2.])
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testOpInFunctionWithConflictingResourceInputs(self):
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0],
name='cpu')
v_also_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0],
name='also_cpu')
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0],
name='gpu')
@polymorphic_function.function
def resource_apply_adam():
gen_training_ops.resource_apply_adam(
v_cpu.handle,
v_gpu.handle,
v_also_cpu.handle,
1.0, # beta1_power
1.0, # beta2_power
1.0, # learning_rate
1.0, # beta1
1.0, # beta2
1.0, # epsilon,
[1.0, 1.0, 1.0], # grad
False) # use_locking
return 1
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Cannot place the graph because a reference or resource edge connects '
'colocation groups with incompatible assigned devices'):
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(resource_apply_adam())
@test_util.run_gpu_only
def testFunctionHandlesInputsOnDifferentDevices(self):
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = polymorphic_function.function(array_ops.reshape)
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = reshape(value, shape).cpu()
self.assertAllEqual(reshaped, [[1], [2]])
@test_util.run_gpu_only
def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self):
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = polymorphic_function.function(array_ops.reshape)
value = constant_op.constant([1., 2.])
shape = constant_op.constant([2, 1]).gpu()
reshape(value, shape) # No error is raised
def testNoneOutput(self):
@polymorphic_function.function
def my_function(_):
return None
self.assertAllEqual(my_function(1), None)
def testNestedFunctions(self):
# TensorFlow function (which is what would be used in TensorFlow graph
# construction).
@tf_function.Defun(dtypes.int32, dtypes.int32)
def add(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def add_one(x):
return add(x, 1)
self.assertAllEqual(3, add_one(constant_op.constant(2)))
def testVariableCaptureInNestedFunctions(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int32)
@polymorphic_function.function
def inner_read():
return v.read_value()
@polymorphic_function.function
def outer():
return inner_read()
self.assertEqual(1, int(outer()))
def testReturnCapturedEagerTensor(self):
t = constant_op.constant(1)
@polymorphic_function.function
def read():
return t
self.assertEqual(1, int(read()))
def testReturnCapturedGraphTensor(self):
with context.graph_mode(), self.cached_session():
t = constant_op.constant(1)
@polymorphic_function.function
def read():
return t
self.assertEqual(1, int(self.evaluate(read())))
def testConcreteFunctionType(self):
y = constant_op.constant(1)
@polymorphic_function.function
def foo(x):
return {'input': x, 'capture': y}
cf = foo.get_concrete_function(tensor_lib.TensorSpec([], dtypes.int32))
x = constant_op.constant(2)
output = cf(x)
self.assertEqual(set(output.keys()), {'input', 'capture'})
self.assertEqual(output['input'].numpy(), 2)
self.assertEqual(output['capture'].numpy(), 1)
parameters = list(cf.function_type.parameters.values())
self.assertLen(parameters, 1)
self.assertEqual(parameters[0].name, 'x')
self.assertEqual(
parameters[0].type_constraint,
tensor_lib.TensorSpec([], dtypes.int32),
)
captures = cf.function_type.captures
self.assertLen(captures, 1)
self.assertEqual(captures[id(y)], tensor_lib.TensorSpec([], dtypes.int32))
output = cf.function_type.output
self.assertEqual(output, trace_type.from_value({'input': x, 'capture': y}))
def testSequenceInputs(self):
clip_by_global_norm = polymorphic_function.function(
clip_ops.clip_by_global_norm)
t_list = [constant_op.constant(1.0), constant_op.constant(2.0)]
clipped_list, global_norm = clip_by_global_norm(t_list,
constant_op.constant(.2))
for t in clipped_list:
self.assertIsInstance(t, tensor_lib.Tensor)
self.assertIsInstance(global_norm, tensor_lib.Tensor)
def testNestedSequenceInputs(self):
def my_op(inputs):
a, b, c = inputs
e, f = b
g, h = e
return [a + a, [tuple([f + f, g + g]), h + h], c + c], a + f + g + h + c
my_eager_op = polymorphic_function.function(my_op)
ret = my_eager_op([
constant_op.constant(1),
[(constant_op.constant(2), constant_op.constant(3)),
constant_op.constant(4)],
constant_op.constant(5)
])
self.assertLen(ret, 2)
self.assertAllEqual(ret[0][0], 2)
self.assertAllEqual(ret[0][1][0][0], 8)
self.assertAllEqual(ret[0][1][0][1], 4)
self.assertIsInstance(ret[0][1][0], tuple)
self.assertAllEqual(ret[0][1][1], 6)
self.assertAllEqual(ret[0][2], 10)
self.assertAllEqual(ret[1], 15)
def testVariableNamesRespectNameScopesWithDefun(self):
@polymorphic_function.function
def create_variable():
with ops.name_scope('foo', skip_on_eager=False):
v = resource_variable_ops.ResourceVariable(0.0, name='bar')
self.assertEqual(v.name, 'foo/bar:0')
create_variable()
def testVariableNamesRespectNameScopesWithDefunInGraph(self):
with context.graph_mode():
@polymorphic_function.function
def create_variable():
with ops.name_scope('foo', skip_on_eager=False):
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name='bar')
self.assertEqual(v.name, 'foo/bar:0')
with ops.get_default_graph().as_default():
create_variable()
@test_util.run_in_graph_and_eager_modes
def testVariablesPlacedOnOutsideDevice(self):
class _Obj(object):
def __init__(self):
self.v = None
@polymorphic_function.function
def f(self):
if self.v is None:
self.v = variables.Variable(1.)
return self.v + 1.
has_device = _Obj()
with ops.device('cpu:0'):
has_device.f()
self.assertIn('CPU', has_device.v.device)
@test_util.run_in_graph_and_eager_modes
def testCallingGraphFunctionOnDifferentDevice(self):
def func():
return constant_op.constant(0)
defined = polymorphic_function.function(func)
with ops.device('cpu:0'):
cpu_graph_function = defined.get_concrete_function()
with ops.device('cpu:0'):
self.assertEqual(
self.evaluate(cpu_graph_function()), self.evaluate(func()))
with ops.device('cpu:1'):
self.assertEqual(0., self.evaluate(cpu_graph_function()))
with ops.device(None):
self.assertEqual(0., self.evaluate(cpu_graph_function()))
default_graph_function = defined.get_concrete_function()
self.assertEqual(
self.evaluate(default_graph_function()), self.evaluate(func()))
with ops.device('cpu:1'):
self.assertEqual(0., self.evaluate(default_graph_function()))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testColocateWithRespected(self):
# TODO(b/113291792): Use multiple CPUs instead of a GPU.
with ops.device('cpu:0'):
x = array_ops.identity(1.0)
with ops.device('gpu:0'):
y = array_ops.identity(1.0)
@polymorphic_function.function
def foo():
return test_ops.device_placement_op()
with ops.colocate_with(x):
self.assertIn(compat.as_bytes('CPU:0'), self.evaluate(foo()))
with ops.colocate_with(y):
self.assertIn(compat.as_bytes('GPU:0'), self.evaluate(foo()))
@parameterized.parameters([(True), (False)])
def testVariablesAreTracked(self, reduce_retracing):
v = resource_variable_ops.ResourceVariable(1.0)
def foo(x):
return v * x
defined = polymorphic_function.function(
foo, reduce_retracing=reduce_retracing)
x = constant_op.constant([1.0])
self.assertEqual(1., self.evaluate(defined(x)))
v.assign(2.)
x = constant_op.constant([1.0, 2.0])
self.assertAllEqual([2., 4.], self.evaluate(defined(x)))
def testInputSignatureMustBeSequenceOfTensorSpecs(self):
def foo(a, b):
del a
del b
# Signatures must consist exclusively of `TensorSpec` objects.
signature = [(2, 3), tensor_lib.TensorSpec([2, 3], dtypes.float32)]
with self.assertRaisesRegex(TypeError, 'input_signature.*nested sequence'):
polymorphic_function.function(foo, input_signature=signature)
@test_util.run_in_graph_and_eager_modes
def testInputsIncompatibleWithSignatureRaisesError(self):
def foo(a):
return a
signature = [tensor_lib.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = polymorphic_function.function(foo, input_signature=signature)
# Valid call
defined(array_ops.ones([2]))
# Invalid shapes.
with self.assertRaisesRegex(
TypeError, r'Can not cast .*dtype=tf.int32.* to .*dtype=tf.float32.*'
):
defined(array_ops.ones([3], dtype=dtypes.int32))
# Invalid shapes.
with self.assertRaisesRegex(TypeError, 'Can not cast.*'):
defined(array_ops.ones([3]))
with self.assertRaisesRegex(TypeError, 'Can not cast.*'):
defined(array_ops.ones([2, 1]))
# Wrong number of arguments.
with self.assertRaisesRegex(TypeError, 'too many positional arguments'):
defined(array_ops.ones([2]), array_ops.ones([2]))
with self.assertRaisesRegex(TypeError, 'missing a required argument'):
defined()
with self.assertRaisesRegex(
TypeError, r'Can not cast .*shape=\(3,\).* to .*shape=\(2,\).*'
):
defined.get_concrete_function(
tensor_lib.TensorSpec(shape=(3,), dtype=dtypes.float32))
def testMismatchedConcreteSignatureRaisesError(self):
@polymorphic_function.function
def run_test():
@polymorphic_function.function
def f(x):
return x
with self.assertRaisesRegex(
TypeError, 'Binding inputs to tf.function failed .*'):
f.get_concrete_function(1)(constant_op.constant(1))
f.get_concrete_function(constant_op.constant(1))(1)
with self.assertRaisesRegex(
TypeError, 'Binding inputs to tf.function failed .*'):
f.get_concrete_function(1)(2)
run_test()
def testInputSignatureConversionWithDefaultArg(self):
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
signature = [
tensor_lib.TensorSpec([], dtypes.float32),
tensor_lib.TensorSpec([], dtypes.bool),
]
defined = polymorphic_function.function(foo, input_signature=signature)
a = constant_op.constant(1.0)
self.assertAllEqual(a.numpy(), defined(a))
self.assertAllEqual(a.numpy(), defined(a, training=True))
self.assertAllEqual(-a.numpy(), defined(a, training=False))
def testVariableSpecWithInputSignature(self):
def f(v):
v.assign_add(1)
signature = [
resource_variable_ops.VariableSpec(shape=[], dtype=dtypes.int32)
]
with self.assertRaisesRegex(TypeError,
"input_signature doesn't support VariableSpec"):
polymorphic_function.function(f, input_signature=signature)
def testDefuningInstanceMethod(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo:
def one(self, tensor):
return tensor
@polymorphic_function.function
def two(self, tensor, other=integer):
return self.one(tensor), other
foo = Foo()
t = constant_op.constant(1.0)
one, two = foo.two(t)
self.assertEqual(one.numpy(), 1.0)
self.assertEqual(two.numpy(), 2)
def testDefuningInstanceMethodWithDefaultArgument(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo:
@polymorphic_function.function
def func(self, other=integer):
return other
foo = Foo()
self.assertEqual(foo.func().numpy(), int(integer))
def testPythonCallWithSideEffects(self):
state = []
@polymorphic_function.function
def side_effecting_function():
state.append(0)
side_effecting_function()
self.assertAllEqual(state, [0])
# The second invocation should call the graph function, which shouldn't
# trigger the list append.
side_effecting_function()
self.assertAllEqual(state, [0])
# Whereas calling the python function directly should create a side-effect.
side_effecting_function.python_function()
self.assertAllEqual(state, [0, 0])
def testFunctionWithNestedFunctionCallAndSideEffects(self):
v1 = variables.Variable(1.0)
v2 = variables.Variable(1.0)
@polymorphic_function.function
def add_one(a):
a.assign_add(1.0)
# Grappler will inline calls to `add_one` into the function body, we check
# that all side-effects were executed.
@polymorphic_function.function
def side_effecting_function(a, b):
add_one(a)
add_one(b)
return a + b
result = side_effecting_function(v1, v2)
self.assertEqual(result.numpy(), 4.0)
def testRegisterConcreteFunction(self):
@polymorphic_function.function
def py_add(x, y):
return math_ops.add(x, y)
py_add(array_ops.ones([]), array_ops.ones([]))
add = py_add.get_concrete_function(
tensor_lib.TensorSpec(None, dtypes.float32),
tensor_lib.TensorSpec(None, dtypes.float32))
@polymorphic_function.function
def py_composite(x, y):
return x, add(x, y)
py_composite(array_ops.ones([]), array_ops.ones([]))
composite = py_composite.get_concrete_function(
tensor_lib.TensorSpec(None, dtypes.float32),
tensor_lib.TensorSpec(None, dtypes.float32))
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
composite.add_to_graph()
composite.add_gradient_functions_to_graph()
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.cached_definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*py_composite.*',
'.*inference.*py_add.*',
'.*forward.*py_composite.*',
'.*forward.*py_add.*',
'.*inference.*backward.*py_composite.*',
'.*inference.*backward.*py_add.*',
]
for expected, found in zip(expected_func_name_regex,
captured_function_names):
self.assertRegex(found, expected)
composite_t, composite_double = composite(t, t)
double = add(t, t)
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(double))
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(composite_double))
self.assertAllEqual([[1, 2], [3, 4]], self.evaluate(composite_t))
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
def testEagerCaptures(self):
with context.eager_mode():
large_tensor = array_ops.ones(shape=(256,))
self.assertGreater(256, capture_container._EAGER_CONST_THRESHOLD)
small_tensor = array_ops.ones(shape=(4,))
self.assertLessEqual(4, capture_container._EAGER_CONST_THRESHOLD)
v = resource_variable_ops.ResourceVariable(0.0)
for captured, op_type in [(large_tensor, 'Placeholder'),
(small_tensor, 'Const'), (v, 'Placeholder')]:
@polymorphic_function.function
def test_fn():
return captured + 1 # pylint: disable=cell-var-from-loop
g = test_fn.get_concrete_function().graph
internal_captures = g.internal_captures
self.assertLen(internal_captures, 1)
self.assertEqual(internal_captures[0].op.type, op_type)
@parameterized.parameters([(True), (False)])
def testVariableAliasIdInStructuredInputSignature(self, reduce_retracing):
@polymorphic_function.function(reduce_retracing=reduce_retracing)
def foo(v1, v2):
return v1 + v2
v1 = resource_variable_ops.ResourceVariable(1.0)
v2 = resource_variable_ops.ResourceVariable(2.0)
graph_function = foo.get_concrete_function(v1, v1)
args_sig, _ = graph_function.graph.structured_input_signature
expected_spec = resource_variable_ops.VariableSpec([], alias_id=0)
self.assertLen(args_sig, 2)
self.assertEqual(args_sig[0], expected_spec)
self.assertEqual(args_sig[1], expected_spec)
graph_function = foo.get_concrete_function(v1, v2)
args_sig, _ = graph_function.graph.structured_input_signature
expected_spec1 = resource_variable_ops.VariableSpec([], alias_id=0)
expected_spec2 = resource_variable_ops.VariableSpec([], alias_id=1)
self.assertLen(args_sig, 2)
self.assertEqual(args_sig[0], expected_spec1)
self.assertEqual(args_sig[1], expected_spec2)
def testStructuredSignatureAndMultipleVariables(self):
self.skipTest('b/209081027: Enable this test after Variable becomes a '
'CompositeTensor and Variable gets expand to handle tensor.')
@polymorphic_function.function
def foo(v1, v2):
return v1 + v2
v1 = resource_variable_ops.ResourceVariable(1.0)
v2 = resource_variable_ops.ResourceVariable(2.0)
graph_function = foo.get_concrete_function(v1, v1)
self.assertAllEqual(graph_function(v1, v1), 2.0)
with self.assertRaises(TypeError):
graph_function(v1, v2)
def _total_function_cache_def_func(self, defined):
return defined._list_all_concrete_functions() # pylint: disable=protected-access
@parameterized.parameters([(True), (False)])
def testVariableRetracingOnDtypeChanges(self, reduce_retracing):
@polymorphic_function.function(reduce_retracing=reduce_retracing)
def defined(a, b):
return a + b
x1 = resource_variable_ops.ResourceVariable(0.0)
x2 = resource_variable_ops.ResourceVariable(0.0)
defined(x1, x2)
self.assertLen(self._total_function_cache_def_func(defined), 1)
# Should expect retracing for new dtypes
y1 = resource_variable_ops.ResourceVariable(0)
y2 = resource_variable_ops.ResourceVariable(1)
defined(y1, y2)
self.assertLen(self._total_function_cache_def_func(defined), 2)
def testVariableRetracingDtypeShape(self):
@polymorphic_function.function
def defined(a, b):
return a + b
x1 = resource_variable_ops.ResourceVariable(0.0)
x2 = resource_variable_ops.ResourceVariable(0.0)
defined(x1, x2)
self.assertLen(self._total_function_cache_def_func(defined), 1)
y1 = resource_variable_ops.ResourceVariable([0.0, 1.0])
y2 = resource_variable_ops.ResourceVariable([0.0, 1.0])
defined(y1, y2)
self.assertLen(self._total_function_cache_def_func(defined), 2)
z1 = resource_variable_ops.ResourceVariable([[0.0, 1.0]])
z2 = resource_variable_ops.ResourceVariable([[0.0, 1.0]])
defined(z1, z2)
self.assertLen(self._total_function_cache_def_func(defined), 3)
def testFunctionModifiesInputList(self):
# Tests on `list` methods that do in place modification, except `list.sort`
# since it cannot even be "defunned" in the first place
def get_list():
return [constant_op.constant(0.), constant_op.constant(1.)]
expected_msg = '.*() should not modify'
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def append(l):
l.append(constant_op.constant(0.))
append(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def extend(l):
l.extend([constant_op.constant(0.)])
extend(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def insert(l):
l.insert(0, constant_op.constant(0.))
insert(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def pop(l):
l.pop()
pop(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def reverse(l):
l.reverse()
reverse(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def remove(l):
l.remove(l[0])
remove(get_list())
# `list.clear` is a method that is in Py3 but not Py2
if sys.version.startswith('3'):
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def clear(l):
l.clear()
clear(get_list())
# One last test for keyword arguments
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def kwdappend(**kwargs):
l = kwargs['l']
l.append(constant_op.constant(0.))
kwdappend(l=get_list())
def testFunctionModifiesInputDict(self):
def get_dict():
return {'t1': constant_op.constant(0.), 't2': constant_op.constant(1.)}
expected_msg = '.* should not modify'
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def clear(m):
m.clear()
clear(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def pop(m):
m.pop('t1')
pop(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def popitem(m):
m.popitem()
popitem(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def update(m):
m.update({'t1': constant_op.constant(3.)})
update(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@polymorphic_function.function
def setdefault(m):
m.setdefault('t3', constant_op.constant(3.))
setdefault(get_dict())
def testFunctionModifiesInputNest(self):
with self.assertRaisesRegex(ValueError, 'modify.* should not modify'):
@polymorphic_function.function
def modify(n):
n[0]['t1'].append(constant_op.constant(1.))
nested_input = [{
't1': [constant_op.constant(0.),
constant_op.constant(1.)],
},
constant_op.constant(2.)]
modify(nested_input)
with self.assertRaisesRegex(ValueError,
'modify_same_flat.* should not modify'):
# The flat list doesn't change whereas the true structure changes
@polymorphic_function.function
def modify_same_flat(n):
n[0].append(n[1].pop(0))
nested_input = [[constant_op.constant(0.)],
[constant_op.constant(1.),
constant_op.constant(2.)]]
modify_same_flat(nested_input)
def testFunctionStackInErrorMessage(self):
if context.executing_eagerly():
# TODO(b/122736651): Remove this skipTest once fixed.
self.skipTest('Error interpolation is not working when function is '
'invoked without PartitionedCallOp.')
@polymorphic_function.function()
def fn3(x):
return x + 2
@polymorphic_function.function()
def fn2(x):
check_ops.assert_equal(fn3(x), 3)
return 2
@polymorphic_function.function()
def fn(x):
return fn2(x)
with self.assertRaises(errors.InvalidArgumentError) as cm:
fn(2)
e = cm.exception
self.assertIn('fn -> fn2', e.message)
self.assertIn('node assert_equal/Assert/Assert (defined at', e.message)
self.assertNotIn('fn3', e.message)
@test_util.run_gpu_only
def testFunctionIsNotPinned(self):
"""Tests that functions aren't pinned to the CPU by the eager runtime."""
seed1, seed2 = 79, 25
shape = constant_op.constant([4, 7])
dtype = dtypes.float32
@polymorphic_function.function
def func():
with ops.device('GPU:0'):
return gen_random_ops.random_standard_normal(
shape, dtype=dtype, seed=seed1, seed2=seed2)
with ops.device('GPU:0'):
x = func()
self.assertRegex(x.device, 'GPU')
def testLimitedRetracingWithCompositeTensors(self):
trace_count = [0]
@polymorphic_function.function
def f(x):
trace_count[0] += 1
return x
for i in range(10):
f(ragged_factory_ops.constant([[1, 2], [i]]))
f(ragged_factory_ops.constant([[1, 2], [], [3, 4, 5]]))
f(ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]]))
self.assertEqual(trace_count[0], 3)
def testCompositeTensorsWithReducedRetracing(self):
inp = ragged_factory_ops.constant([[1, 2], [3]])
@polymorphic_function.function(reduce_retracing=True)
def f(x):
return x
output = f(inp)
self.assertTrue(math_ops.reduce_all(math_ops.equal(inp, output)))
def testMultipleInputsWithReducedRetracing(self):
tensor1 = ragged_factory_ops.constant([[1, 2], [3]])
tensor2 = ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]])
variable1 = variables.Variable(1.0)
variable2 = variables.Variable(2.0)
@polymorphic_function.function(reduce_retracing=True)
def f(a, b, c, d):
return [a, b, c, d]
output = f(tensor1, tensor2, variable1, variable2)
self.assertTrue(math_ops.reduce_all(math_ops.equal(tensor1, output[0])))
self.assertTrue(math_ops.reduce_all(math_ops.equal(tensor2, output[1])))
self.assertTrue(math_ops.reduce_all(math_ops.equal(variable1, output[2])))
self.assertTrue(math_ops.reduce_all(math_ops.equal(variable2, output[3])))
def test_concrete_function_shape_mismatch(self):
@polymorphic_function.function
def f(argument_name):
return argument_name + 1.
f_concrete = f.get_concrete_function(constant_op.constant([1.]))
# Calling a function from eager doesn't do any shape checking above what
# kernels do while executing.
self.assertAllEqual([2., 3.],
f_concrete(constant_op.constant([1., 2.])).numpy())
@polymorphic_function.function
def g():
f_concrete(constant_op.constant([1., 2.]))
with self.assertRaisesRegex(
TypeError,
r'Can not cast TensorSpec\(shape=\(2,\).* to TensorSpec\(shape=\(1,\)',
):
g()
@test_util.run_in_graph_and_eager_modes
def test_shape_inference_with_symbolic_shapes(self):
@polymorphic_function.function
def _uses_symbolic_shapes(w, x, y):
x = array_ops.identity(x, name='name_collision')
x = array_ops.transpose(x, [1, 0, 2])
x_batch = array_ops.shape(x)[0]
y_batch = array_ops.shape(y)[0]
y *= w
n = y_batch // x_batch
return array_ops.reshape(y, [n, x_batch, -1])
conc = _uses_symbolic_shapes.get_concrete_function(
tensor_lib.TensorSpec(None, dtypes.float32),
tensor_lib.TensorSpec(None, dtypes.float32),
tensor_lib.TensorSpec(None, dtypes.float32))
@polymorphic_function.function
def _call_concrete():
c = constant_op.constant(1.)
array_ops.identity(c, name='name_collision')
output1 = conc(
array_ops.ones([2]), array_ops.ones([5, 4, 2]),
array_ops.ones([20, 2]))
self.assertEqual([5, 4, 2], output1.shape)
output2 = conc(
array_ops.ones([3]), array_ops.ones([5, 4, 3]),
array_ops.ones([40, 3]))
self.assertEqual([10, 4, 3], output2.shape)
return output1, output2
output1, output2 = _call_concrete()
self.assertEqual((5, 4, 2), self.evaluate(output1).shape)
self.assertEqual((10, 4, 3), self.evaluate(output2).shape)
def testAutoGraphContext(self):
@polymorphic_function.function
def test_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
prev_status = ag_ctx.control_status_ctx().status
test_fn()
self.assertEqual(ag_ctx.control_status_ctx().status, prev_status)
@test_util.disable_tfrt('b/170435618')
def testCancelBeforeFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
@polymorphic_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
c_mgr.start_cancel()
with self.assertRaises(errors.CancelledError):
cancelable_func()
@test_util.disable_tfrt('b/170435618')
def testCancelBlockedFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
@polymorphic_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
def cancel_thread():
time.sleep(0.5)
c_mgr.start_cancel()
t = self.checkedThread(cancel_thread)
t.start()
with self.assertRaises(errors.CancelledError):
cancelable_func()
t.join()
@test_util.disable_tfrt('b/170435618')
def testCancelAfterFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
q.enqueue(37)
@polymorphic_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
self.assertAllEqual(37, cancelable_func().numpy())
# Cancellation after the function executes is a no-op.
c_mgr.start_cancel()
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNestedTensorInputs(self):
@polymorphic_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = constant_op.constant(1000)
b = constant_op.constant(200)
c = constant_op.constant(30)
d = {'a': a, 'b': b}
e = (c, 4)
# Test different argument signatures when constructing the concrete func.
for cf in [
f.get_concrete_function(d, e),
f.get_concrete_function(d, y=e),
f.get_concrete_function(y=e, x=d),
f.get_concrete_function(_spec_for_value(d), _spec_for_value(e)),
f.get_concrete_function(_spec_for_value(d), y=_spec_for_value(e)),
f.get_concrete_function(y=_spec_for_value(e), x=_spec_for_value(d))
]:
# Test different calling conventions when calling the concrete func.
for output in [
cf(d, e), # structured signature
cf(d, y=e), # structured signature w/ kwarg
cf(y=e, x=d), # structured signature w/ 2 kwargs
cf(a, b, c), # flat signature
]:
self.assertIsInstance(output, tuple)
self.assertLen(output, 2)
self.assertAllEqual(output[0], 1200)
self.assertAllEqual(output[1], 34)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNestedNonTensorInputs(self):
@polymorphic_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': constant_op.constant(1000), 'b': constant_op.constant(200)}
b = (50, 3)
for cf in [ # argument y is bound to non-Tensor value (50, 3).
f.get_concrete_function(a, b),
f.get_concrete_function(a, y=b),
f.get_concrete_function(x=a, y=b)
]:
for output in [cf(a, b), cf(x=a, y=b)]:
self.assertAllEqual(output[0] + output[1], 1253)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNonTensorStringInputs(self):
@polymorphic_function.function
def f(x, y):
return string_ops.string_join([x, y])
a = constant_op.constant('a')
b = 'b'
cf = f.get_concrete_function(a, b)
for output in [cf(a), cf(x=a), cf(a, b), cf(x=a, y=b)]:
self.assertAllEqual(output, b'ab')
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithBoundNestedNonTensorInputs(self):
@polymorphic_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': 3000, 'b': 200, 'c': 9000}
b = (constant_op.constant(30), 4)
for cf in [ # argument x is bound to non-tensor value `a`
f.get_concrete_function(a, b),
f.get_concrete_function(a, y=b),
f.get_concrete_function(x=a, y=b)
]:
for output in [cf(a, b), cf(a, y=b), cf(x=a, y=b)]:
self.assertAllEqual(output[0] + output[1], 3234)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithAllBoundNestedNonTensorInputs(self):
@polymorphic_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': 5000, 'b': 500}
b = (50, 5)
cf = f.get_concrete_function(a, b)
for output in [cf(), cf(a, b), cf(x=a, y=b)]:
self.assertAllEqual(output[0] + output[1], 5555)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionMethodWithVarargs(self):
float32_scalar = tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32)
class MyModel(module.Module):
@polymorphic_function.function(
input_signature=[float32_scalar, float32_scalar])
def add(self, *arg):
return math_ops.add(*arg)
m = MyModel()
cf = m.add.get_concrete_function()
cf(-12.0, 3.0)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionStructuredSignatureKeywordOrder(self):
# Check that keyword-only arguments are sorted appropriately, so that they
# feed the right tensor into each input.
@polymorphic_function.function
def g(**kwargs):
return string_ops.reduce_join(
string_ops.reduce_join(
ops.convert_to_tensor(sorted(kwargs.items())),
axis=1,
separator='='),
axis=0,
separator=', ')
s = constant_op.constant('s')
g.get_concrete_function(q=s, a=s, p=s, r=s, v=s, m=s, l=s)
self.assertAllEqual(
g(m='a', r='b', v='c', q='d', l='e', a='f', p='g'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
self.assertAllEqual(
g(q='d', a='f', p='g', r='b', v='c', m='a', l='e'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
self.assertAllEqual(
g(a='f', l='e', m='a', p='g', q='d', r='b', v='c'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
def testSameConcreteFunctionDifferentKwargOrder(self):
@polymorphic_function.function
def foo(**kwargs):
return kwargs['a'] + math_ops.cast(kwargs['b'], dtypes.float32)
foo(a=constant_op.constant(1.0), b=constant_op.constant(1))
foo(b=constant_op.constant(1), a=constant_op.constant(1.0))
self.assertLen(total_function_cache(foo), 1)
def testEmptyInputSignatures(self):
class Foo:
@polymorphic_function.function(input_signature=[])
def bar_none(self):
return 1
@polymorphic_function.function(input_signature=[])
def bar_one(self, x=0):
return x
@polymorphic_function.function(input_signature=[])
def bar_two(self, x=0, y=1):
return x + y
foo = Foo()
self.assertEqual(foo.bar_none.input_signature, ())
self.assertEqual(foo.bar_one.input_signature, ())
self.assertEqual(foo.bar_two.input_signature, ())
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
dict(
testcase_name='MissingArg',
conc_args=lambda: (1, constant_op.constant(2)),
call_args=lambda: (1,),
error=r'missing a required argument: \'y\'',
),
dict(
testcase_name='MissingVararg',
conc_args=lambda: (1, 2, constant_op.constant(1.0)),
call_args=lambda: (1, 2),
error=r'missing a required argument: \'varargs_0\'',
),
dict(
testcase_name='ExtraPositionalArg',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2, 3),
error=r'too many positional arguments',
),
dict(
testcase_name='MissingKeywordOnlyArg',
conc_args=lambda: (1, 2),
conc_kwargs=lambda: {'c': constant_op.constant(1.0)},
call_args=lambda: (1, 2),
error=r'missing a required( keyword-only)? argument: \'c\'',
),
dict(
testcase_name='ExtraKeywordArg',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2),
call_kwargs=lambda: {'c': constant_op.constant(1.0)},
error=r'got an unexpected keyword argument',
),
dict(
testcase_name='ExpectedRaggedGotNest',
conc_args=lambda: (ragged_factory_ops.constant([[1, 2], [3]]),),
call_args=lambda: ({'a': constant_op.constant([1, 2, 3])}, 5),
error=(
r'Binding inputs .* failed .* don\'t have the same nested'
r' structure'
),
),
dict(
testcase_name='WrongRaggedRank',
conc_args=lambda: (ragged_factory_ops.constant([[1, 2], [3]]),),
call_args=lambda: (ragged_factory_ops.constant([[[1]]]), 5),
error=(
r'Binding inputs .* failed .* don\'t have the same nested'
r' structure'
),
),
dict(
testcase_name='WrongRaggedDType',
conc_args=lambda: (ragged_factory_ops.constant([[1]]),),
call_args=lambda: (ragged_factory_ops.constant([[1.0]]), 5),
error=(
r'Binding inputs .* failed.*dtype=tf.float32.* to'
r' .*dtype=tf.int32.*'
),
),
dict(
testcase_name='ExpectedDictGotTensor',
conc_args=lambda: (
{'a': constant_op.constant(1), 'b': constant_op.constant(1)},
),
call_args=lambda: (constant_op.constant(1), 5),
error=r'Binding inputs .* failed .*Can not cast .*Tensor.* to a Dict',
),
dict(
testcase_name='ExpectedTupleGotTensor',
conc_args=lambda: (
(constant_op.constant(1), constant_op.constant(2)),
),
call_args=lambda: (constant_op.constant(1), 5),
error=r'Binding inputs .* failed .*Can not cast .*Tensor.* to tuple',
),
dict(
testcase_name='WrongDType',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1.0), 5),
exception=(
TypeError,
errors.InvalidArgumentError,
# on xla_gpu, we get InternalError instead.
errors.InternalError,
),
),
dict(
testcase_name='ExpectedIntGotDifferentInt',
conc_args=lambda: (5,),
call_args=lambda: (8, 5),
error=r'Binding inputs .* failed .*Can not cast 8 to .*5',
),
dict(
testcase_name='ExpectedIntGotTensor',
conc_args=lambda: (5,),
call_args=lambda: (constant_op.constant(6), 5),
error=r'Binding inputs .* failed .*Can not cast .*Tensor.* to .*5',
),
dict(
testcase_name='TwoValuesForArgument',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2),
call_kwargs=lambda: {'x': 3},
error=r'got an unexpected keyword argument \'x\'',
),
])
# pylint: enable=g-long-lambda
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionStructuredSignatureError(self,
conc_args=(),
conc_kwargs=None,
call_args=(),
call_kwargs=None,
error='.*',
exception=TypeError):
"""Tests for errors in the structured signature.
Args:
conc_args: Positional arguments used for get_concrete_function.
conc_kwargs: Keyword arguments used for get_concrete_function.
call_args: Positional arguments used to call the function.
call_kwargs: Keyword arguments used to call the function.
error: Expected exception message.
exception: Expected exception type.
"""
conc_args = conc_args() if callable(conc_args) else conc_args
conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}
call_args = call_args() if callable(call_args) else call_args
call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}
self.assertIsInstance(conc_args, tuple)
self.assertIsInstance(call_args, tuple)
self.assertIsInstance(conc_kwargs, dict)
self.assertIsInstance(call_kwargs, dict)
@polymorphic_function.function
def func(x, y=5, *varargs, **kwargs): # pylint: disable=keyword-arg-before-vararg
del y, varargs, kwargs
return x
conc = func.get_concrete_function(*conc_args, **conc_kwargs)
with self.assertRaisesRegex(exception, error):
self.evaluate(conc(*call_args, **call_kwargs))
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
dict(
testcase_name='MissingArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1),),
error=r'func\(x, y\) missing required arguments: y'),
dict(
testcase_name='TwoValuesForArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1),),
call_kwargs=lambda: {
'x': constant_op.constant(1),
'y': constant_op.constant(1)
},
error=r"func\(x, y\) got two values for 'x'"),
dict(
testcase_name='ExtraPositionalArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1), constant_op.constant(2),
constant_op.constant(3)),
error=r'func\(x, y\) takes 2 .* got 3'),
dict(
testcase_name='UnexpectedKeywordArg',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1),),
call_kwargs=lambda: {'c': constant_op.constant(1)},
error=r'func\(x\) got unexpected keyword arguments: c'),
dict(
testcase_name='MissingVararg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2),
constant_op.constant(3)),
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
error=r'func\(x, y, varargs_0\) missing required '
r'arguments: varargs_0'),
dict(
testcase_name='MissingKeywordArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
conc_kwargs=lambda: {'c': constant_op.constant(1)},
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
error=r'func\(x, y, c\) missing required arguments: c'),
dict(
testcase_name='ExpectedTensorGotInt',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (5, constant_op.constant(2)),
error=r'func\(x, y\): expected argument #0\(zero-based\) to be '
r'a Tensor; got int \(5\)'),
dict(
testcase_name='WrongDType',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1.0),),
exception=(
ValueError,
errors.InvalidArgumentError,
# on xla_gpu, we get InternalError instead.
errors.InternalError)),
dict(
testcase_name='MissingKeywordArgNestPiece',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
conc_kwargs=lambda: {'c': ragged_factory_ops.constant([[1]])},
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_kwargs=lambda: {'c': constant_op.constant(1)},
error=r'func\(x, y, c, c_1\) missing required arguments: c_1'),
])
# pylint: enable=g-long-lambda
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionFlatSignatureError(self,
conc_args=(),
conc_kwargs=None,
call_args=(),
call_kwargs=None,
error='.*',
exception=TypeError):
"""Tests for errors in the flat signature.
Args:
conc_args: Positional arguments used for get_concrete_function.
conc_kwargs: Keyword arguments used for get_concrete_function.
call_args: Positional arguments used to call the function.
call_kwargs: Keyword arguments used to call the function.
error: Expected exception message.
exception: Expected exception type.
"""
conc_args = conc_args() if callable(conc_args) else conc_args
conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}
call_args = call_args() if callable(call_args) else call_args
call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}
self.assertIsInstance(conc_args, tuple)
self.assertIsInstance(call_args, tuple)
self.assertIsInstance(conc_kwargs, dict)
self.assertIsInstance(call_kwargs, dict)
@polymorphic_function.function
def func(x, y=5, *varargs, **kwargs): # pylint: disable=keyword-arg-before-vararg
del y, varargs, kwargs
return x
conc = func.get_concrete_function(*conc_args, **conc_kwargs)
with self.assertRaisesRegex(exception, error):
self.evaluate(conc._call_with_flat_signature(call_args, call_kwargs)) # pylint: disable=protected-access
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionAmbiguousSignature(self):
# When both the flat & structured signatures are applicable, but they
# give different results, we use the structured signature. Note: we expect
# this to be extremely rare.
@polymorphic_function.function
def f(x, y):
return x * 10 + y
conc = f.get_concrete_function(
x=tensor_lib.TensorSpec(None, dtypes.int32, name='y'),
y=tensor_lib.TensorSpec(None, dtypes.int32, name='x'))
result = conc(x=constant_op.constant(5), y=constant_op.constant(6))
self.assertAllEqual(result, 56)
def testPrettyPrintedSignature(self):
@polymorphic_function.function
def func(x, kangaroo=None, octopus=7):
del octopus, kangaroo
return x
scalar = constant_op.constant(5)
vector = constant_op.constant([10, 10, 20])
concrete_fn = func.get_concrete_function(scalar, vector)
summary = (
'(x: TensorSpec(shape=(), dtype=tf.int32, name=None), kangaroo:'
' TensorSpec(shape=(3,), dtype=tf.int32, name=None), octopus:'
' Literal[7]) -> TensorSpec(shape=(), dtype=tf.int32, name=None)'
)
details = (
'Input Parameters:\n'
+ ' x (POSITIONAL_OR_KEYWORD): TensorSpec(shape=(),'
' dtype=tf.int32, name=None)\n'
+ ' kangaroo (POSITIONAL_OR_KEYWORD):'
' TensorSpec(shape=(3,), dtype=tf.int32, name=None)\n'
+ ' octopus (POSITIONAL_OR_KEYWORD): Literal[7]\n'
+ 'Output Type:\n'
+ ' TensorSpec(shape=(), dtype=tf.int32, name=None)\n'
+ 'Captures:\n'
+ ' None'
)
self.assertEqual(
concrete_fn.pretty_printed_signature(verbose=False), summary
)
self.assertEqual(
concrete_fn.pretty_printed_signature(verbose=True), details
)
self.assertRegex(repr(concrete_fn), r'<ConcreteFunction .* at .*')
self.assertEqual(str(concrete_fn), 'ConcreteFunction {}'.format(details))
def testPrettyPrintedExplicitSignatureWithKeywordArg(self):
@polymorphic_function.function(
input_signature=[tensor_lib.TensorSpec(None)])
def fn(a, b=1):
return a + b
concrete_fn = fn.get_concrete_function()
self.assertEqual(
concrete_fn.pretty_printed_signature(False),
'(a: TensorSpec(shape=<unknown>, dtype=tf.float32, name=None), b:'
' Literal[1]) -> TensorSpec(shape=<unknown>, dtype=tf.float32,'
' name=None)',
)
self.assertEqual(
concrete_fn.pretty_printed_signature(True),
'Input Parameters:\n'
+ ' a (POSITIONAL_OR_KEYWORD):'
' TensorSpec(shape=<unknown>, dtype=tf.float32, name=None)\n'
+ ' b (POSITIONAL_OR_KEYWORD): Literal[1]\n'
+ 'Output Type:\n'
+ ' TensorSpec(shape=<unknown>, dtype=tf.float32, name=None)\n'
+ 'Captures:\n'
+ ' None',
)
def testPrettyPrintedSignatureLoadedNamedTuple(self):
Point = collections.namedtuple('Point', ['x', 'y'])
@polymorphic_function.function
def fn(b, a): # pylint: disable=unused-argument
return 1.
b = Point(
x=constant_op.constant(1., dtype=dtypes.float32),
y=constant_op.constant(1., dtype=dtypes.float32))
a = Point(
x=constant_op.constant(1, dtype=dtypes.int32),
y=constant_op.constant(1, dtype=dtypes.int32))
mod = module.Module()
f = fn.get_concrete_function(b, a)
save(mod, '/tmp/f', signatures=f)
loaded = load('/tmp/f')
printed = loaded.signatures['serving_default'].pretty_printed_signature()
self.assertEqual(
printed,
'Input Parameters:\n'
+ " a (KEYWORD_ONLY): TensorSpec(shape=(), dtype=tf.int32, name='a')\n"
+ ' a_1 (KEYWORD_ONLY): TensorSpec(shape=(),'
" dtype=tf.int32, name='a_1')\n"
+ ' b (KEYWORD_ONLY): TensorSpec(shape=(),'
" dtype=tf.float32, name='b')\n"
+ ' b_1 (KEYWORD_ONLY):'
" TensorSpec(shape=(), dtype=tf.float32, name='b_1')\n"
+ 'Output Type:\n'
+ " Dict[['output_0', TensorSpec(shape=(), dtype=tf.float32,"
" name='output_0')]]\n"
+ 'Captures:\n'
+ ' None',
)
@test_util.run_in_graph_and_eager_modes
def testIndexedSlicesAsGradientsForConcreteFunctions(self):
@polymorphic_function.function
def summing_rnn(inputs):
return math_ops.reduce_sum(inputs, axis=1)
@polymorphic_function.function
def gradients(inputs):
with backprop.GradientTape() as tape:
tape.watch(inputs)
hidden = summing_rnn(inputs)
hidden = array_ops.gather(hidden, constant_op.constant([0]))
loss = math_ops.reduce_mean(hidden)
return tape.gradient(loss, inputs)
gradients(constant_op.constant([[[1.0], [2.0]]])) # No error is raised
def testWithExtraWrapper(self):
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@polymorphic_function.function
@dummy_tf_decorator
def add(self, x, y, z=1):
if self.var is None:
return x + y + z
foo = Foo()
self.assertEqual(foo.add(2, 3).numpy(), 6)
@parameterized.parameters([
(polymorphic_function.function, dummy_tf_decorator),
(dummy_tf_decorator, polymorphic_function.function),
(polymorphic_function.function, polymorphic_function.function)
])
def testWithExtraWrapperRedundantArgs(self, decorator1, decorator2):
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@decorator1
@decorator2
def add1(self, x, y):
if self.var is None:
return x + y
foo = Foo()
with self.assertRaisesRegex(TypeError, 'multiple values for argument'):
foo.add1(2, x=3) # pylint: disable=redundant-keyword-arg,no-value-for-parameter
def testWithExtraWrapperMissingArgs(self):
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@polymorphic_function.function
@dummy_tf_decorator
def add1(self, x, y):
if self.var is None:
return x + y
@polymorphic_function.function
@dummy_tf_decorator
def add2(self, x, y):
if self.var is None:
return x + y
@polymorphic_function.function
@polymorphic_function.function
def add3(self, x, y):
if self.var is None:
return x + y
foo = Foo()
with self.assertRaisesRegex(TypeError,
'missing a required argument: \'y\''):
foo.add1(2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError,
'missing a required argument: \'x\''):
foo.add1(y=2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError,
'missing a required argument: \'y\''):
foo.add2(2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError,
'missing a required argument: \'x\''):
foo.add2(y=2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError,
'missing a required argument: \'y\''):
foo.add3(2) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegex(TypeError,
'missing a required argument: \'x\''):
foo.add3(y=2) # pylint: disable=no-value-for-parameter
def testMissingArgsTfFunctionedMethod(self):
class A:
def func(self, position_arg1, position_arg2):
return position_arg1, position_arg2
@polymorphic_function.function
def decorated_method(self, position_arg1, position_arg2):
return position_arg1, position_arg2
a_instance = A()
tf_method_pos = polymorphic_function.function(a_instance.func)
with self.assertRaisesRegex(TypeError, 'missing a required argument'):
tf_method_pos(position_arg2='foo')
# tf.function-decorated instance methods need to be tested because of
# the __get__ method implementation.
tf_func_decorated_method = polymorphic_function.function(
a_instance.decorated_method)
tf_func_decorated_method(position_arg1='foo', position_arg2='bar')
with self.assertRaisesRegex(TypeError, 'missing a required argument'):
tf_func_decorated_method(position_arg2='bar')
def testMissingArgsTfFunctionedObject(self):
class A:
def __call__(self, position_arg1, position_arg2):
return position_arg1, position_arg2
a_instance = A()
# A tf.function-decorated callable object needs to be tested because of
# the special inspect results.
tf_func_obj = polymorphic_function.function(a_instance)
tf_func_obj(position_arg1=1, position_arg2=2)
with self.assertRaisesRegex(TypeError, 'missing a required argument'):
tf_func_obj(position_arg2='bar')
def testMissingArgsTfFunctionedFunctions(self):
def func_pos(position_arg1, position_arg2):
return position_arg1, position_arg2
def func_with_default(position_arg, named_arg=None):
return position_arg, named_arg
def func_pos_3args(position_arg1, position_arg2, position_arg3):
return position_arg1, position_arg2, position_arg3
tf_func_pos = polymorphic_function.function(func_pos)
with self.assertRaisesRegex(
TypeError, 'missing a required argument'):
tf_func_pos(position_arg2='foo')
tf_func_with_default = polymorphic_function.function(func_with_default)
tf_func_with_default(position_arg='bar')
with self.assertRaisesRegex(TypeError, 'missing a required argument'):
tf_func_with_default(named_arg='foo')
tf_func_pos_3args = polymorphic_function.function(func_pos_3args)
with self.assertRaisesRegex(TypeError, 'missing a required argument'):
tf_func_pos_3args(position_arg2='foo')
def testShapeInferencePropagateConstNestedStack(self):
@polymorphic_function.function(input_signature=[
tensor_lib.TensorSpec((None, None), dtype=dtypes.int32),
tensor_lib.TensorSpec((), dtype=dtypes.int32),
])
def f(x, s):
old_shape = array_ops.shape(x)
new_shape = array_ops_stack.stack([old_shape[0], s], axis=0)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@polymorphic_function.function(input_signature=[
tensor_lib.TensorSpec(shape=(3, 6), dtype=dtypes.int32)
])
def g(x):
y = f(x, s=5)
assert y.shape.as_list() == [3, 5], y.shape.as_list()
return y
self.assertAllEqual(
g(array_ops.zeros([3, 6], dtype=dtypes.int32)), array_ops.ones([3, 5]))
def testShapeInferencePropagateConstNestedUnstackStack(self):
@polymorphic_function.function(input_signature=[
tensor_lib.TensorSpec((None, None), dtype=dtypes.int32),
tensor_lib.TensorSpec((), dtype=dtypes.int32),
])
def f(x, s):
s0, _ = array_ops_stack.unstack(array_ops.shape(x), axis=0)
new_shape = array_ops_stack.stack([s0, s], axis=0)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@polymorphic_function.function(input_signature=[
tensor_lib.TensorSpec(shape=(3, 6), dtype=dtypes.int32)
])
def g(x):
y = f(x, s=5)
assert y.shape.as_list() == [3, 5], y.shape.as_list()
return y
self.assertAllEqual(
g(array_ops.zeros([3, 6], dtype=dtypes.int32)), array_ops.ones([3, 5]))
def testShapeInferencePropagateConstNestedConcat(self):
@polymorphic_function.function(input_signature=[
tensor_lib.TensorSpec((), dtype=dtypes.int32),
tensor_lib.TensorSpec((), dtype=dtypes.int32),
tensor_lib.TensorSpec((), dtype=dtypes.int32),
])
def f(d1, d2, d3):
new_shape = array_ops.concat([[d1], [d2], [d3]], axis=-1)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@polymorphic_function.function()
def g():
y = f(1, 2, 3)
assert y.shape.as_list() == [1, 2, 3], y.shape.as_list()
return y
self.assertAllEqual(g(), array_ops.ones([1, 2, 3]))
def testShapeInferencePropagateConstDoubleNested(self):
@polymorphic_function.function(input_signature=[
tensor_lib.TensorSpec((), dtype=dtypes.int32),
tensor_lib.TensorSpec((), dtype=dtypes.int32),
tensor_lib.TensorSpec((), dtype=dtypes.int32),
])
def f(d1, d2, d3):
new_shape = array_ops.concat([[d1], [d2], [d3]], axis=-1)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@polymorphic_function.function()
def g():
y = polymorphic_function.function(f)(1, 2, 3)
assert y.shape.as_list() == [1, 2, 3], y.shape.as_list()
return y
self.assertAllEqual(g(), array_ops.ones([1, 2, 3]))
@test_util.run_v2_only
def testControlDependencyAfterInline(self):
v = variables.Variable(0.)
@polymorphic_function.function
def assign():
return v.assign(1.)
@polymorphic_function.function
def assign_add():
return v.assign_add(1.)
@polymorphic_function.function
def f():
check_ops.assert_equal_v2(assign(), 1.)
check_ops.assert_equal_v2(assign_add(), 2.)
# We don't have a way to inspect the inlined graph in Python, so we run it
# multiple times to have more confidence the dependency is correct.
for _ in range(30):
f()
@test_util.run_v2_only
def testReadInFuncWriteOutside(self):
# Run many times since we are testing for a potential race condition.
for _ in range(30):
# pylint: disable=cell-var-from-loop
v = variables.Variable(1.)
@polymorphic_function.function
def add_one():
return v + 1.
@polymorphic_function.function
def get_v_plus_one():
v_plus_one = add_one()
v.assign_add(2.0)
return v_plus_one
self.assertAllEqual(get_v_plus_one(), 2.0)
def testOpExpandErrorMessage(self):
@polymorphic_function.function
def test_fn():
if array_ops.constant(False):
return array_ops.constant(1)
else:
return script_ops.eager_py_func(
func=lambda: array_ops.constant([2.]), inp=(), Tout=dtypes.int32)
error_pattern = re.compile(r'Graph execution error.*test_fn', re.DOTALL)
with self.assertRaisesRegex(errors.InvalidArgumentError, error_pattern):
test_fn()
def testNoVariables(self):
@polymorphic_function.function
def fn(x):
return 2 * x
self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0)
def testFailIfVariablesAreCreatedMoreThanOnce(self):
@polymorphic_function.function
def fn(x):
return variables.Variable(1.0) + x
with self.assertRaises(ValueError):
fn(1.0)
def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self):
state = []
@polymorphic_function.function
def fn(x):
state.append(variables.Variable(1.0))
return state[-1] + x
with self.assertRaises(ValueError):
fn(1.0)
def testRange(self):
@polymorphic_function.function
def f(unused_x):
return 1.0
self.assertAllEqual(f(range(5)), 1.0)
def testCorrectVariableCreation(self):
state = []
@polymorphic_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testFunctionInitializer(self):
state = []
@polymorphic_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
def testFunctionMultipleVariableInitializer(self):
state = []
@polymorphic_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
state.append(variables.Variable(lambda: 5.0))
return state[0] * x, state[1] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), [2.0, 5.0])
def testFunctionInitializationFunction(self):
state = []
@polymorphic_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
init_fn = fn.get_initialization_function(constant_op.constant(1.0))
self.assertLen(state, 1)
self.assertFalse(
resource_variable_ops.var_is_initialized_op(state[0].handle))
init_fn()
self.assertEqual(state[0].numpy(), 2.0)
def testVariableInitializerNotConstant(self):
state = []
@polymorphic_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testLegacyGraphModeVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@polymorphic_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 2.0)
self.assertAllEqual(self.evaluate(result), 6.0)
def testLegacyGraphModeVariablesNonTrivialInitializer(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@polymorphic_function.function
def fn(x):
if not state:
two = constant_op.constant(2.0)
four = two * two
two_again = math_ops.sqrt(four)
state.append(variables.Variable(two_again + four))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 6.0)
self.assertAllEqual(self.evaluate(result), 18.0)
def testLegacyGraphModeInputDependentInitializerFails(self):
with ops.Graph().as_default():
state = []
@polymorphic_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
with self.assertRaisesRegex(lift_to_graph.UnliftableError,
r'transitively.* mul .* x'):
fn(constant_op.constant(3.0))
def testMethod(self):
class MyModel:
def __init__(self):
self.var = None
@polymorphic_function.function
def apply(self, x):
if self.var is None:
self.var = variables.Variable(2.0)
return self.var * x
m0 = MyModel()
self.assertAllEqual(m0.apply(3.0), 6.0)
# Calling twice to exercise that we do not recreate variables.
m0.var.assign(3.0)
self.assertAllEqual(m0.apply(3.0), 9.0)
m1 = MyModel()
self.assertAllEqual(m1.apply(3.0), 6.0)
def testMethodExtensionType(self):
class MaskedTensorExtensionType(extension_type.ExtensionType):
values: tensor_lib.Tensor
mask: tensor_lib.Tensor
@polymorphic_function.function
def with_default(self, default_value):
return array_ops.where_v2(self.mask, self.values, default_value)
@polymorphic_function.function
def sum(self):
# Use a loop & conditional to test that autograph works correctly.
result = 0
for i in range(array_ops.size(self.values)):
if self.mask[i]:
result += self.values[i]
return result
mt = MaskedTensorExtensionType([1, 2, 3], [True, False, True])
self.assertAllEqual(mt.with_default(-1), [1, -1, 3])
self.assertAllEqual(mt.sum(), 4)
def test_functools_partial(self):
self.assertAllClose(
3.,
polymorphic_function.function(
functools.partial(lambda x, y: x + y,
1.))(constant_op.constant(2.)))
def test_functools_partial_new_default(self):
def f(x=3, y=7):
return x + y
func = polymorphic_function.function(functools.partial(f, y=6))
self.assertEqual(func().numpy(), 9)
self.assertEqual(func(y=8).numpy(), 11)
def test_functools_partial_keywords(self):
def f(x, y):
return x + y
func = polymorphic_function.function(
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.zeros([1])))
self.assertAllEqual(func(), [0.0])
def test_functools_partial_single_positional(self):
def f(x, y):
return x + y
func = polymorphic_function.function(
functools.partial(f, constant_op.constant(1)))
self.assertAllEqual(func(5), 6)
def test_complicated_partial_with_defaults(self):
def identity(*args):
return args
def dynamic_unroll(core_fn,
input_sequence,
initial_state,
sequence_length=None,
parallel_iterations=1,
swap_memory=False):
del core_fn
self.assertIs(None, sequence_length)
self.assertEqual(1, parallel_iterations)
self.assertTrue(swap_memory)
return input_sequence, initial_state
input_sequence = random_ops.random_uniform([1, 1, 1])
initial_state = random_ops.random_uniform([1, 1])
func = polymorphic_function.function(
functools.partial(dynamic_unroll, identity, swap_memory=True))
func(input_sequence, initial_state)
def test_unspecified_default_argument(self):
wrapped = polymorphic_function.function(
lambda x, y=2: x + y,
input_signature=[tensor_lib.TensorSpec((), dtypes.int32)])
self.assertEqual(3, wrapped(constant_op.constant(1)).numpy())
def test_concrete_function_from_signature(self):
@polymorphic_function.function(
input_signature=[tensor_lib.TensorSpec(None, dtypes.float32)])
def compute(x):
return 2. * x
concrete = compute.get_concrete_function()
self.assertAllClose(1., concrete(constant_op.constant(0.5)))
concrete = compute.get_concrete_function(
tensor_lib.TensorSpec(None, dtypes.float32))
self.assertAllClose(4., concrete(constant_op.constant(2.)))
signature_args, _ = concrete.structured_input_signature
self.assertEqual(signature_args,
(tensor_lib.TensorSpec(
None, dtypes.float32, name='x'),))
def testInputSignatureMissingTensorSpecsMethod(self):
class MyModule(module.Module):
def f1(self, arg1, arg2, arg3):
pass
def f2(self, arg1, arg2, arg3, **kwargs):
pass
def f3(self, arg1, arg2, arg3, arg4=4, **kwargs):
pass
def f4(self, arg1, arg2, arg3, *args):
pass
def f5(self, arg1, arg2, arg3, *args, **kwargs):
pass
def f6(self, arg1, arg4=4, **kwargs):
return arg1 + arg4
m = MyModule()
tf_func_dec = polymorphic_function.function(
input_signature=(tensor_lib.TensorSpec([], dtypes.int32),))
error_message = 'input_signature missing type constraint'
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(m.f1)(1, 2, 3)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(m.f2)(1, 2, 3)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(m.f3)(1, 2, 3)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(m.f4)(1, 2, 3)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(m.f5)(1, 2, 3)
self.assertEqual(tf_func_dec(m.f6)(1).numpy(), 5)
def testInputSignatureMissingTensorSpecsFunction(self):
tf_func_dec = polymorphic_function.function(
input_signature=(tensor_lib.TensorSpec([], dtypes.int32),))
error_message = 'input_signature missing type constraint'
# pylint: disable=unused-argument
def f1(arg1, arg2, arg3):
pass
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(f1)(1, 2, 3)
def f2(arg1, arg2, arg3, **kwargs):
pass
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(f2)(1, 2, 3)
def f3(arg1, arg2, arg3, arg4=4, **kwargs):
pass
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(f3)(1, 2, 3)
def f4(arg1, arg2, arg3, *args):
pass
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(f4)(1, 2, 3)
def f5(arg1, arg2, arg3, *args, **kwargs):
pass
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(f5)(1, 2, 3)
# pyline: enable=unused-argument
def f6(arg1, arg4=4, **kwargs):
return arg1 + arg4
self.assertEqual(tf_func_dec(f6)(1).numpy(), 5)
def testInputSignatureMissingTensorSpecsLambdaFunction(self):
tf_func_dec = polymorphic_function.function(
input_signature=(tensor_lib.TensorSpec([], dtypes.int32),))
error_message = 'input_signature missing type constraint'
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(lambda ar1, arg2, arg3: None)(1, 2, 3)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(lambda arg1, arg2, arg3, **kwargs: None)(1, 2, 3)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(lambda arg1, arg2, arg3, arg4=4, **kwargs: None)(1, 2, 3)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(lambda arg1, arg2, arg3, *args: None)(1, 2, 3)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(lambda arg1, arg2, arg3, *args, **kwargs: None)(1, 2, 3)
self.assertEqual(
tf_func_dec(lambda arg1, arg4=4, **kwargs: arg1 + arg4)(1).numpy(), 5)
@parameterized.named_parameters(('_method', 'method'),
('_function', 'function'),
('_lambda_function', 'lambda_function'))
def testInputSignaturePartialFuncMissingTensorSpecs(self, func_type):
if func_type == 'method':
class MyModule(module.Module):
def f(self, arg1, arg2, arg3, arg4=4):
return arg1 + arg2 + arg3 + arg4
f = MyModule().f
elif func_type == 'function':
def f(arg1, arg2, arg3, arg4=4):
return arg1 + arg2 + arg3 + arg4
else: # lambda_function
f = lambda arg1, arg2, arg3, arg4=4: arg1 + arg2 + arg3 + arg4
error_message = 'input_signature missing type constraint'
tf_func_dec = polymorphic_function.function(
input_signature=(tensor_lib.TensorSpec([], dtypes.int32),)
)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(functools.partial(f, 1))(2, 3)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(functools.partial(f, arg4=5))(1, 2, 3)
with self.assertRaisesRegex(TypeError, error_message):
tf_func_dec(functools.partial(f, 1, arg4=5))(2, 3)
self.assertAllEqual(
tf_func_dec(functools.partial(f, 1, 2, arg4=5))(3),
array_ops.constant(11),
)
@test_util.run_in_graph_and_eager_modes
def test_variable_naming(self):
class HasVars(module.Module):
def __init__(self):
self.x = None
self.y = None
self.z = None
@polymorphic_function.function
def make_x(self):
if self.x is None:
self.x = variables.Variable(1., name='v')
def make_y(self):
if self.y is None:
self.y = variables.Variable(1., name='v')
def make_z(self):
if self.z is None:
with ops.name_scope('z_scope', skip_on_eager=False):
self.z = variables.Variable(1., name='z')
root = HasVars()
root.make_x()
root.make_y()
root.make_z()
self.assertEqual('v:0', root.x.name)
self.assertEqual('z_scope/z:0', root.z.name)
def test_concrete_function_keyword_arguments(self):
@polymorphic_function.function
def f(x):
return x
conc = f.get_concrete_function(
tensor_lib.TensorSpec(None, dtypes.float32, 'y'))
conc(y=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('y', signature_args[0].name)
# If name is not specified, the previously named one will be returned.
conc = f.get_concrete_function(tensor_lib.TensorSpec(None, dtypes.float32))
conc(x=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('y', signature_args[0].name)
# New name will return updated signature.
conc = f.get_concrete_function(
tensor_lib.TensorSpec(None, dtypes.float32, 'z')
)
conc(x=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('z', signature_args[0].name)
@polymorphic_function.function
def g(x):
return x[0]
conc = g.get_concrete_function(
[tensor_lib.TensorSpec(None, dtypes.float32, 'z'), 2])
conc(z=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('z', signature_args[0][0].name)
def testRuntimeErrorNotSticky(self):
@polymorphic_function.function
def fail(i):
control_flow_assert.Assert(math_ops.equal(i, 0), ['ick'])
fail(constant_op.constant(0)) # OK
with self.assertRaises(errors.InvalidArgumentError):
fail(constant_op.constant(1)) # InvalidArgument: "ick"
fail(constant_op.constant(0)) # OK
def testUnderscoreName(self):
@polymorphic_function.function
def f(_):
return _ + _
self.assertAllEqual(2.0, f(constant_op.constant(1.0)))
def test_serialization_signature_cache(self):
@polymorphic_function.function
def f(x, y):
return x, y
f(constant_op.constant([[3., 4.]]), constant_op.constant([2.]))
f(constant_op.constant([[3, 4, 5]]), constant_op.constant([2]))
signatures_args = set()
concrete_functions = f._list_all_concrete_functions_for_serialization()
for concrete_function in concrete_functions:
args, kwargs = concrete_function.structured_input_signature
signatures_args.add(args)
self.assertEqual(dict(), kwargs)
self.assertEqual(
signatures_args,
set(((tensor_lib.TensorSpec([1, 2], dtypes.float32, name='x'),
tensor_lib.TensorSpec([1], dtypes.float32, name='y')),
(tensor_lib.TensorSpec([1, 3], dtypes.int32, name='x'),
tensor_lib.TensorSpec([1], dtypes.int32, name='y')))))
@test_util.assert_no_garbage_created
def testFunctionReferenceCycles(self):
fn = polymorphic_function.function(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.assert_no_garbage_created
def testMethodReferenceCycles(self):
has_decorated_method = _HasDecoratedMethod()
has_decorated_method.f(constant_op.constant(5.))
weak_fn = weakref.ref(has_decorated_method.f)
del has_decorated_method
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testErrorMessageWhenGraphTensorIsPassedToEager(self):
@polymorphic_function.function
def failing_function():
a = constant_op.constant(1.)
with ops.init_scope():
_ = a + a
with self.assertRaisesRegex(
TypeError,
re.compile('polymorphic_function_test.*out of scope', re.DOTALL)):
failing_function()
def testSymbolicTensorIllegalCaptureCallTimeError(self):
x = None
@polymorphic_function.function
def f1(a):
nonlocal x
x = a
return a
@polymorphic_function.function
def f2(b):
return b + x
f1(constant_op.constant(1))
with self.assertRaisesRegex(
TypeError,
re.compile('polymorphic_function_test.*out of scope', re.DOTALL)):
f2(constant_op.constant(2))
def testSymbolicTensorIllegalCaptureTraceTimeError(self):
@polymorphic_function.function
def f(inputs):
num_steps, _ = inputs.shape[:2]
outputs = []
for t in math_ops.range(num_steps):
outputs.append(inputs[t])
return outputs
with self.assertRaisesRegex(errors.InaccessibleTensorError, 'out of scope'):
f(array_ops.zeros(shape=(8, 42, 3)))
def testNonUniqueNamesGetConcreteFunction(self):
@polymorphic_function.function
def non_unique_arg_names(x, **kwargs):
a, b, c = x
d = kwargs['d']
return a + b + c + d
concrete = non_unique_arg_names.get_concrete_function(
(tensor_lib.TensorSpec(None, dtypes.float32),
tensor_lib.TensorSpec(None, dtypes.float32),
tensor_lib.TensorSpec(None, dtypes.float32)),
d=tensor_lib.TensorSpec(None, dtypes.float32))
self.assertAllClose(
10.,
concrete(x=constant_op.constant(1.),
x_1=constant_op.constant(2.),
x_2=constant_op.constant(3.),
d=constant_op.constant(4.)))
self.assertAllClose(
10.,
concrete(constant_op.constant(1.),
constant_op.constant(2.),
constant_op.constant(3.),
constant_op.constant(4.)))
def testDuplicatedSanitizedNames(self):
@polymorphic_function.function
def foo(**kwargs):
return kwargs['a_b'] + kwargs['a/b']
error_message = 'Name collision after sanitization.'
with self.assertRaisesRegex(ValueError, error_message):
foo(**{'a_b': 1, 'a/b': 2})
def testVariableCreatorScope(self):
created_variables = []
captured_variables = []
@polymorphic_function.function
def f():
if not created_variables:
created_variables.append(variables.Variable(1.))
return created_variables[0] + 1.
def capture_creator(next_creator, **kwargs):
created = next_creator(**kwargs)
captured_variables.append(created)
return created
with variable_scope.variable_creator_scope(capture_creator):
f()
self.assertEqual(created_variables, captured_variables)
def testVarAlreadyInitializedNoClobbering(self):
v_holder = []
@polymorphic_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
add_var.get_concrete_function(constant_op.constant(2.))
self.assertAllClose([13., 14.], add_var(constant_op.constant(2.)))
def testSameVariableTwice(self):
v = variables.Variable(1.0)
@polymorphic_function.function
def add(a, b):
return a + b
self.assertAllEqual(add(v, v), 2.0)
def testSameVariableTwiceWithReducedRetracing(self):
v = variables.Variable(2.0)
@polymorphic_function.function(reduce_retracing=True)
def add(a, b):
return a + b
self.assertAllEqual(add(v, v), 4.0)
def testVariableUpdate(self):
v1 = variables.Variable(1.0)
v2 = variables.Variable(2.0)
v3 = variables.Variable(4, dtype=dtypes.int32)
trace_count = [0]
@polymorphic_function.function
def double_variable(x):
trace_count[0] += 1
x.assign_add(x.read_value())
self.assertEqual(trace_count[0], 0)
double_variable(v1)
self.assertEqual(trace_count[0], 1)
self.assertEqual(self.evaluate(v1), 2.0)
double_variable(v2)
# No retracing because v2's data type and shape are the same as v1
self.assertEqual(trace_count[0], 1)
self.assertEqual(self.evaluate(v2), 4.0)
double_variable(v3)
# Retracing because of data type change
self.assertEqual(trace_count[0], 2)
self.assertEqual(self.evaluate(v3), 8)
def testShapeCache(self):
@polymorphic_function.function
def func(x):
return 2 * x
func_a = func.get_concrete_function(
tensor_lib.TensorSpec([None], dtypes.int32))
func_b = func.get_concrete_function(
tensor_lib.TensorSpec([None], dtypes.int32))
self.assertIs(func_a, func_b)
def testCacheWithinSaveContext(self):
@polymorphic_function.function
def func(x):
return 2 * x
func_a = func.get_concrete_function(constant_op.constant(2.))
func_b = func.get_concrete_function(constant_op.constant(2.))
self.assertIs(func_a, func_b)
with save_context.save_context(
save_options.SaveOptions(experimental_variable_policy=save_options
.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES)):
func_c = func.get_concrete_function(constant_op.constant(2.))
with save_context.save_context(
save_options.SaveOptions(
experimental_variable_policy=save_options.VariablePolicy.NONE)):
func_d = func.get_concrete_function(constant_op.constant(2.))
self.assertIsNot(func_a, func_c)
self.assertIsNot(func_a, func_d)
def testInitializationInNestedCall(self):
v_holder = []
@polymorphic_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
@polymorphic_function.function
def wrapper(x):
return add_var(x)
self.assertAllClose([13., 14.], wrapper(constant_op.constant(2.)))
v_holder[1].assign(11.)
self.assertAllClose([14., 15.], wrapper(constant_op.constant(2.)))
@test_util.run_gpu_only
def testDeviceAnnotationRespected(self):
a = []
@polymorphic_function.function()
def create_variable():
with ops.init_scope():
initial_value = random_ops.random_uniform(
(2, 2), maxval=1000000, dtype=dtypes.int64)
if not a:
with ops.device('CPU:0'):
a.append(resource_variable_ops.ResourceVariable(initial_value))
return a[0].read_value()
create_variable()
self.assertRegex(a[0].device, 'CPU')
@test_util.run_gpu_only
def testDeviceAnnotationForInitializerRespected(self):
a = []
initial_value = []
def initial_value_fn():
initial_value.append(random_ops.random_uniform((2, 3)))
return initial_value[0]
@polymorphic_function.function()
def create_variable():
with ops.init_scope():
if not a:
a.append(variables.Variable(initial_value_fn))
with ops.device('CPU:0'):
create_variable()
self.assertRegex(a[0].device, 'CPU')
self.assertRegex(initial_value[0].device, 'CPU')
def testDecorate(self):
func = polymorphic_function.function(lambda: 1)
def decorator(f):
return lambda: 1 + f()
func._decorate(decorator)
self.assertEqual(func().numpy(), 2)
@parameterized.parameters(*itertools.product(
(None, (tensor_lib.TensorSpec([]),)), # input_signature
(True, False), # autograph
(None, converter.Feature.ALL), # autograph_options
(None, 'foo.bar'), # implements
(None, True, False), # relax_shapes
(True, False), # compile
(True, False), # override_function
))
def testClone(self, input_signature, autograph, autograph_options, implements,
relax_shapes, compile_, override_function):
original_py_function = lambda x: x
compile_ = False
func = polymorphic_function.function(
func=original_py_function,
input_signature=input_signature,
autograph=autograph,
experimental_implements=implements,
experimental_autograph_options=autograph_options,
reduce_retracing=relax_shapes,
jit_compile=compile_)
if override_function:
cloned_py_function = lambda x: x + 1
else:
cloned_py_function = original_py_function
cloned = func._clone(python_function=cloned_py_function)
self.assertEqual(cloned_py_function, cloned._python_function)
self.assertEqual(func._name, cloned._name)
self.assertEqual(input_signature, cloned.input_signature)
self.assertEqual(autograph, cloned._autograph)
self.assertEqual(func._attributes, cloned._attributes)
self.assertEqual(autograph_options, cloned._experimental_autograph_options)
self.assertEqual(relax_shapes, cloned._reduce_retracing)
self.assertEqual(compile_, cloned._jit_compile)
# This test does not run with XLA JIT support linked in so we can only check
# the output of the function if compile is disabled.
if not compile_:
x = array_ops.zeros([])
self.assertEqual(self.evaluate(cloned(x)),
self.evaluate(cloned_py_function(x)))
def testLiftPlaceholderInitializedVariable(self):
with ops.Graph().as_default():
var_list = []
@polymorphic_function.function
def use_variable():
if not var_list:
initial_value = array_ops.placeholder(shape=[], dtype=dtypes.float32)
v = variables.Variable(initial_value)
var_list.append(v)
return var_list[0] + 1.
var_plus_one = use_variable()
with self.session() as session:
init_op = var_list[0].initializer
session.run(init_op, feed_dict={init_op.inputs[1]: 2.})
self.assertEqual(3., session.run(var_plus_one))
def testDecorate_rejectedAfterTrace(self):
func = polymorphic_function.function(lambda: 1)
self.assertEqual(func().numpy(), 1)
msg = 'Functions cannot be decorated after they have been traced.'
with self.assertRaisesRegex(ValueError, msg):
func._decorate(lambda f: f)
def testGetConcreteFunctionGraphLifetime(self):
@polymorphic_function.function
def func():
pass
graph = func.get_concrete_function().graph
del func
# If the graph is deleted, then an exception is raised on reading `captures`
self.assertEmpty(graph.captures)
@parameterized.parameters(*itertools.product(
(None, (tensor_lib.TensorSpec([]),)), # input_signature
(True, False), # autograph
(None, converter.Feature.ALL), # autograph_options
(None, 'foo.bar'), # implements
(None, True, False), # relax_shapes
))
def test_pickle(self, input_signature, autograph, autograph_options,
implements, relax_shapes):
"""@function objects can be pickled and unpickled."""
original_py_function = undecorated_function
func = polymorphic_function.function(
func=original_py_function,
input_signature=input_signature,
autograph=autograph,
experimental_implements=implements,
experimental_autograph_options=autograph_options,
reduce_retracing=relax_shapes,
)
cloned = pickle.loads(pickle.dumps(func))
self.assertEqual(func._name, cloned._name)
self.assertEqual(input_signature, cloned.input_signature)
self.assertEqual(autograph, cloned._autograph)
self.assertEqual(func._attributes, cloned._attributes)
self.assertEqual(autograph_options, cloned._experimental_autograph_options)
self.assertEqual(relax_shapes, cloned._reduce_retracing)
x = array_ops.ones([])
self.assertEqual(self.evaluate(cloned(x)), self.evaluate(func(x)))
def test_frequent_retracing_warning(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
@polymorphic_function.function
def f(x):
return x
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
self.assertEmpty(logs.output)
f(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_lambda(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
f = polymorphic_function.function(lambda x: x)
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
f(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_method(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
class Foo:
@polymorphic_function.function
def f(self, x):
return x
f = Foo().f
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
f(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_two_independent_tf_functions(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
@polymorphic_function.function
def f(x):
return x
@polymorphic_function.function
def g(x):
return x
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
g(1)
g(2)
g(3)
g(4)
g(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_nested(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
@polymorphic_function.function
def inner(x):
return x + 1
@polymorphic_function.function
def outer1(x):
return inner(x) * 2
@polymorphic_function.function
def outer2(x):
return inner(x) * 3
with self.assertLogs(level='WARN') as logs:
inner(1)
inner(2)
inner(3)
inner(4)
outer1(5)
outer1(6)
outer1(7)
outer1(8)
outer2(9)
outer2(10)
outer2(11)
outer2(12)
self.assertEmpty(logs.output)
outer2(13)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_on_reinstantiation(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
with self.assertLogs(level='WARN') as logs:
for i in range(5):
@polymorphic_function.function
def f(x):
return x
f(i)
if i < 4:
self.assertEmpty(logs.output)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_restored_function_retracing_warning(self):
class Foo(Checkpoint):
@polymorphic_function.function
def __call__(self, x):
return x
f_flexible = Foo()
_ = f_flexible.__call__.get_concrete_function(
tensor_lib.TensorSpec(shape=[None], dtype=dtypes.int32))
tmp_dir = self.create_tempdir()
save(f_flexible, tmp_dir.full_path)
restored_f_flexible = load(tmp_dir.full_path)
f_fixed_shape = Foo()
with self.assertLogs(level='WARN') as logs:
restored_f_flexible(constant_op.constant([1], dtypes.int32))
restored_f_flexible(constant_op.constant([1, 2], dtypes.int32))
restored_f_flexible(constant_op.constant([1, 2, 3], dtypes.int32))
restored_f_flexible(constant_op.constant([1, 2, 3, 4], dtypes.int32))
restored_f_flexible(constant_op.constant([1, 2, 3, 4, 5], dtypes.int32))
self.assertEmpty(logs.output)
f_fixed_shape(constant_op.constant([1], dtypes.int32))
f_fixed_shape(constant_op.constant([1, 2], dtypes.int32))
f_fixed_shape(constant_op.constant([1, 2, 3], dtypes.int32))
f_fixed_shape(constant_op.constant([1, 2, 3, 4], dtypes.int32))
f_fixed_shape(constant_op.constant([1, 2, 3, 4, 5], dtypes.int32))
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_retracing_warning_limits(self):
@polymorphic_function.function
def my_func(x):
return x
with self.assertLogs(level='WARN') as logs:
for i in range(10):
my_func(i)
self.assertLen(logs.output, 2)
def test_experimental_get_tracing_count_function(self):
@polymorphic_function.function
def double(a):
return a + a
double(constant_op.constant(1))
double(constant_op.constant(2))
self.assertAllEqual(double.experimental_get_tracing_count(), 1)
double(constant_op.constant('a'))
self.assertAllEqual(double.experimental_get_tracing_count(), 2)
def test_experimental_get_tracing_count_method(self):
class TestClass():
@polymorphic_function.function
def testDouble(self, a):
return a + a
obj1 = TestClass()
obj1.testDouble(constant_op.constant(1))
obj1.testDouble(constant_op.constant(2))
obj1.testDouble(constant_op.constant(1.1))
self.assertAllEqual(obj1.testDouble.experimental_get_tracing_count(), 2)
obj2 = TestClass()
obj2.testDouble(constant_op.constant(1))
obj2.testDouble(constant_op.constant(1.1))
obj2.testDouble(constant_op.constant('a'))
self.assertAllEqual(obj2.testDouble.experimental_get_tracing_count(), 3)
self.assertAllEqual(obj1.testDouble.experimental_get_tracing_count(), 2)
def test_tensor_shape_casted_to_specific(self):
@polymorphic_function.function(
input_signature=[tensor_lib.TensorSpec([1])]
)
def specific(x):
self.assertEqual(x.shape, [1])
return x
@polymorphic_function.function(
input_signature=[tensor_lib.TensorSpec(None)]
)
def general(x):
return specific(x)
self.assertEqual(general(constant_op.constant([1.0])).numpy(), 1.0)
def test_recursive_tf_function(self):
@polymorphic_function.function
def recursive_fn(n):
if n > 0:
return recursive_fn(n - 1)
return 1
self.assertEqual(recursive_fn(5).numpy(), 1)
def test_recursive_tf_function_with_gradients(self):
@polymorphic_function.function
def recursive_fn(n, x):
if n > 0:
return n * recursive_fn(n - 1, x)
else:
return x
x = variables.Variable(1.0)
with backprop.GradientTape() as tape:
g = recursive_fn(5, x)
dg_dx = tape.gradient(g, x)
self.assertEqual(dg_dx.numpy(), 120)
def test_recursive_python_function(self):
def recursive_py_fn(n):
if n > 0:
return recursive_py_fn(n - 1)
return 1
@polymorphic_function.function
def recursive_fn(n):
return recursive_py_fn(n)
self.assertEqual(recursive_fn(5).numpy(), 1)
def test_recursive_python_function_with_gradients(self):
def recursive_py_fn(n, x):
if n > 0:
return n * recursive_py_fn(n - 1, x)
return x
@polymorphic_function.function
def recursive_fn(n, x):
return recursive_py_fn(n, x)
x = variables.Variable(1.0)
with backprop.GradientTape() as tape:
g = recursive_fn(5, x)
dg_dx = tape.gradient(g, x)
self.assertEqual(dg_dx.numpy(), 120)
def test_recursive_tf_function_call_each_other(self):
@polymorphic_function.function
def recursive_fn1(n):
if n <= 1:
return 1
return recursive_fn2(n - 1)
@polymorphic_function.function
def recursive_fn2(n):
if n <= 1:
return 2
return recursive_fn1(n - 1)
self.assertEqual(recursive_fn1(5).numpy(), 1)
self.assertEqual(recursive_fn1(6).numpy(), 2)
self.assertEqual(recursive_fn2(5).numpy(), 2)
self.assertEqual(recursive_fn2(6).numpy(), 1)
def test_recursive_tf_function_call_each_other_with_gradients(self):
@polymorphic_function.function
def recursive_fn1(n, x):
if n <= 1:
return x
return n * recursive_fn2(n - 1, x)
@polymorphic_function.function
def recursive_fn2(n, x):
if n <= 1:
return 2 * x
return n * recursive_fn1(n - 1, x)
x = variables.Variable(1.0)
with backprop.GradientTape() as tape:
g1 = recursive_fn1(5, x)
dg1_dx = tape.gradient(g1, x)
self.assertEqual(dg1_dx.numpy(), 120)
with backprop.GradientTape() as tape:
g2 = recursive_fn2(5, x)
dg2_dx = tape.gradient(g2, x)
self.assertEqual(dg2_dx.numpy(), 240)
def test_recursive_tf_function_with_cond(self):
@polymorphic_function.function(autograph=False)
def recursive_fn(n):
return cond_v2.cond_v2(n > 0, recursive_fn(n - 1), 1)
with self.assertRaises(RecursionError):
recursive_fn(constant_op.constant(5))
| FunctionTest |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 67066,
"end": 67657
} | class ____(MemoryPointer):
"""Modifies the ownership semantic of the MemoryPointer so that the
instance lifetime is directly tied to the number of references.
When the reference count reaches zero, the finalizer is invoked.
Constructor arguments are the same as for :class:`MemoryPointer`.
"""
def __init__(self, *args, **kwargs):
super(AutoFreePointer, self).__init__(*args, **kwargs)
# Releease the self reference to the buffer, so that the finalizer
# is invoked if all the derived pointers are gone.
self.refct -= 1
| AutoFreePointer |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numerictypes.py | {
"start": 4355,
"end": 4638
} | class ____(TestCase):
def test_platform_dependent_aliases(self):
if np.int64 is np.int_:
assert_("int64" in np.int_.__doc__)
elif np.int64 is np.longlong:
assert_("int64" in np.longlong.__doc__)
@instantiate_parametrized_tests
| TestDocStrings |
python | viewflow__viewflow | tests/fsm/test_fsm__basics.py | {
"start": 153,
"end": 425
} | class ____(TextChoices): # noqa:D100
NEW = "NEW", _("New")
APPROVED = "APPROVED", _("Approved")
REJECTED = "REJECTED", _("Rejected")
PUBLISHED = "PUBLISHED", _("Published")
HIDDEN = "HIDDEN", _("Hidden")
REMOVED = "REMOVED", _("Removed")
| ReviewState |
python | fluentpython__example-code | 15-context-mngr/mirror.py | {
"start": 1582,
"end": 2189
} | class ____:
def __enter__(self): # <1>
import sys
self.original_write = sys.stdout.write # <2>
sys.stdout.write = self.reverse_write # <3>
return 'JABBERWOCKY' # <4>
def reverse_write(self, text): # <5>
self.original_write(text[::-1])
def __exit__(self, exc_type, exc_value, traceback): # <6>
import sys # <7>
sys.stdout.write = self.original_write # <8>
if exc_type is ZeroDivisionError: # <9>
print('Please DO NOT divide by zero!')
return True # <10>
# <11>
# END MIRROR_EX
| LookingGlass |
python | ray-project__ray | python/ray/air/tests/test_new_dataset_config.py | {
"start": 6291,
"end": 12553
} | class ____(DataParallelTrainer):
def __init__(self, num_workers: int, expect_random: bool, **kwargs):
def train_loop_per_worker():
data_shard = train.get_dataset_shard("train")
assert isinstance(data_shard, DataIterator), data_shard
epoch1 = list(data_shard.iter_rows())
epoch2 = list(data_shard.iter_rows())
print("Epochs", epoch1, "\n", epoch2)
if expect_random:
assert epoch1 != epoch2
else:
assert epoch1 == epoch2
kwargs.pop("scaling_config", None)
super().__init__(
train_loop_per_worker=train_loop_per_worker,
scaling_config=ScalingConfig(num_workers=num_workers),
**kwargs,
)
def test_per_epoch_preprocessing(ray_start_4_cpus):
ds = ray.data.range(100, override_num_blocks=100).randomize_block_order()
test = TestRandom(2, True, datasets={"train": ds})
test.fit()
ds = ray.data.range(100, override_num_blocks=100).random_shuffle()
test = TestRandom(2, True, datasets={"train": ds})
test.fit()
ds = ray.data.range(100, override_num_blocks=100).map(
lambda x: {"id": x["id"] * random.random()}
)
test = TestRandom(2, True, datasets={"train": ds})
test.fit()
def test_materialized_preprocessing(ray_start_4_cpus):
# TODO(ekl) we should test all these configs with splitting enabled, but this
# requires implementing deterministic streaming split.
ds = ray.data.range(100, override_num_blocks=100).randomize_block_order()
ds = ds.materialize()
test = TestRandom(
2,
False,
datasets={"train": ds},
dataset_config=DataConfig(datasets_to_split=[]),
)
test.fit()
ds = ray.data.range(100, override_num_blocks=100).random_shuffle()
ds = ds.materialize()
test = TestRandom(
2,
False,
datasets={"train": ds},
dataset_config=DataConfig(datasets_to_split=[]),
)
test.fit()
ds = ray.data.range(100, override_num_blocks=100).map(
lambda x: {"id": x["id"] * random.random()}
)
ds = ds.materialize()
test = TestRandom(
2,
False,
datasets={"train": ds},
dataset_config=DataConfig(datasets_to_split=[]),
)
test.fit()
def _run_data_config_resource_test(data_config):
cluster_cpus, cluster_gpus = 20, 10
num_workers = 2
# Resources used by training workers.
cpus_per_worker, gpus_per_worker = 2, 1
# Resources used by the trainer actor.
default_trainer_cpus, default_trainer_gpus = 1, 0
num_train_cpus = num_workers * cpus_per_worker + default_trainer_cpus
num_train_gpus = num_workers * gpus_per_worker + default_trainer_gpus
original_execution_options = data_config._get_execution_options("train")
ray.init(num_cpus=cluster_cpus, num_gpus=cluster_gpus)
class MyTrainer(DataParallelTrainer):
def __init__(self, **kwargs):
def train_loop_fn():
train_ds = train.get_dataset_shard("train")
new_execution_options = train_ds._base_dataset.context.execution_options
if original_execution_options.is_resource_limits_default():
# If the original resource limits are default, the new resource
# limits should be the default as well.
# And the new exclude_resources should be the resources used by
# Train + user-defined exclude_resources.
assert new_execution_options.is_resource_limits_default()
exclude_resources = new_execution_options.exclude_resources
assert (
exclude_resources.cpu
== num_train_cpus
+ original_execution_options.exclude_resources.cpu
)
assert (
exclude_resources.gpu
== num_train_gpus
+ original_execution_options.exclude_resources.gpu
)
else:
# If the original resource limits are not default, the new resource
# limits should be the same as the original ones.
# And the new exclude_resources should be zero.
assert (
new_execution_options.resource_limits
== original_execution_options.resource_limits
)
assert (
new_execution_options.exclude_resources
== ExecutionResources.zero()
)
kwargs.pop("scaling_config", None)
super().__init__(
train_loop_per_worker=train_loop_fn,
scaling_config=ScalingConfig(
num_workers=num_workers,
use_gpu=True,
resources_per_worker={
"CPU": cpus_per_worker,
"GPU": gpus_per_worker,
},
),
datasets={"train": ray.data.range(10)},
dataset_config=data_config,
**kwargs,
)
trainer = MyTrainer()
trainer.fit()
def test_data_config_default_resource_limits(shutdown_only):
"""Test that DataConfig should exclude training resources from Data."""
execution_options = ExecutionOptions()
execution_options.exclude_resources = execution_options.exclude_resources.copy(
cpu=2, gpu=1
)
data_config = DataConfig(execution_options=execution_options)
_run_data_config_resource_test(data_config)
def test_data_config_manual_resource_limits(shutdown_only):
"""Test manually setting resource limits in DataConfig."""
execution_options = ExecutionOptions()
execution_options.resource_limits = execution_options.resource_limits.copy(
cpu=10, gpu=5
)
data_config = DataConfig(execution_options=execution_options)
_run_data_config_resource_test(data_config)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-x", __file__]))
| TestRandom |
python | astropy__astropy | astropy/table/soco.py | {
"start": 434,
"end": 1677
} | class ____:
__slots__ = ("key", "value")
def __init__(self, key, value):
self.key = key
self.value = value
def __lt__(self, other):
if other.__class__ is Node:
return (self.key, self.value) < (other.key, other.value)
return self.key < other
def __le__(self, other):
if other.__class__ is Node:
return (self.key, self.value) <= (other.key, other.value)
return self.key <= other
def __eq__(self, other):
if other.__class__ is Node:
return (self.key, self.value) == (other.key, other.value)
return self.key == other
def __ne__(self, other):
if other.__class__ is Node:
return (self.key, self.value) != (other.key, other.value)
return self.key != other
def __gt__(self, other):
if other.__class__ is Node:
return (self.key, self.value) > (other.key, other.value)
return self.key > other
def __ge__(self, other):
if other.__class__ is Node:
return (self.key, self.value) >= (other.key, other.value)
return self.key >= other
__hash__ = None
def __repr__(self):
return f"Node({self.key!r}, {self.value!r})"
| Node |
python | jazzband__django-model-utils | tests/test_fields/test_split_field.py | {
"start": 131,
"end": 2522
} | class ____(TestCase):
full_text = 'summary\n\n<!-- split -->\n\nmore'
excerpt = 'summary\n'
def setUp(self) -> None:
self.post = Article.objects.create(
title='example post', body=self.full_text)
def test_unicode_content(self) -> None:
self.assertEqual(str(self.post.body), self.full_text)
def test_excerpt(self) -> None:
self.assertEqual(self.post.body.excerpt, self.excerpt)
def test_content(self) -> None:
self.assertEqual(self.post.body.content, self.full_text)
def test_has_more(self) -> None:
self.assertTrue(self.post.body.has_more)
def test_not_has_more(self) -> None:
post = Article.objects.create(title='example 2',
body='some text\n\nsome more\n')
self.assertFalse(post.body.has_more)
def test_load_back(self) -> None:
post = Article.objects.get(pk=self.post.pk)
self.assertEqual(post.body.content, self.post.body.content)
self.assertEqual(post.body.excerpt, self.post.body.excerpt)
def test_assign_to_body(self) -> None:
new_text = 'different\n\n<!-- split -->\n\nother'
self.post.body = new_text
self.post.save()
self.assertEqual(str(self.post.body), new_text)
def test_assign_to_content(self) -> None:
new_text = 'different\n\n<!-- split -->\n\nother'
self.post.body.content = new_text
self.post.save()
self.assertEqual(str(self.post.body), new_text)
def test_assign_to_excerpt(self) -> None:
with self.assertRaises(AttributeError):
self.post.body.excerpt = 'this should fail' # type: ignore[misc]
def test_access_via_class(self) -> None:
with self.assertRaises(AttributeError):
Article.body
def test_assign_splittext(self) -> None:
a = Article(title='Some Title')
a.body = self.post.body
self.assertEqual(a.body.excerpt, 'summary\n')
def test_value_to_string(self) -> None:
f = self.post._meta.get_field('body')
self.assertEqual(f.value_to_string(self.post), self.full_text)
def test_abstract_inheritance(self) -> None:
class Child(SplitFieldAbstractParent):
pass
self.assertEqual(
[f.name for f in Child._meta.fields],
["id", "content", "_content_excerpt"])
| SplitFieldTests |
python | sympy__sympy | sympy/functions/special/delta_functions.py | {
"start": 675,
"end": 12241
} | class ____(DefinedFunction):
r"""
The DiracDelta function and its derivatives.
Explanation
===========
DiracDelta is not an ordinary function. It can be rigorously defined either
as a distribution or as a measure.
DiracDelta only makes sense in definite integrals, and in particular,
integrals of the form ``Integral(f(x)*DiracDelta(x - x0), (x, a, b))``,
where it equals ``f(x0)`` if ``a <= x0 <= b`` and ``0`` otherwise. Formally,
DiracDelta acts in some ways like a function that is ``0`` everywhere except
at ``0``, but in many ways it also does not. It can often be useful to treat
DiracDelta in formal ways, building up and manipulating expressions with
delta functions (which may eventually be integrated), but care must be taken
to not treat it as a real function. SymPy's ``oo`` is similar. It only
truly makes sense formally in certain contexts (such as integration limits),
but SymPy allows its use everywhere, and it tries to be consistent with
operations on it (like ``1/oo``), but it is easy to get into trouble and get
wrong results if ``oo`` is treated too much like a number. Similarly, if
DiracDelta is treated too much like a function, it is easy to get wrong or
nonsensical results.
DiracDelta function has the following properties:
1) $\frac{d}{d x} \theta(x) = \delta(x)$
2) $\int_{-\infty}^\infty \delta(x - a)f(x)\, dx = f(a)$ and $\int_{a-
\epsilon}^{a+\epsilon} \delta(x - a)f(x)\, dx = f(a)$
3) $\delta(x) = 0$ for all $x \neq 0$
4) $\delta(g(x)) = \sum_i \frac{\delta(x - x_i)}{\|g'(x_i)\|}$ where $x_i$
are the roots of $g$
5) $\delta(-x) = \delta(x)$
Derivatives of ``k``-th order of DiracDelta have the following properties:
6) $\delta(x, k) = 0$ for all $x \neq 0$
7) $\delta(-x, k) = -\delta(x, k)$ for odd $k$
8) $\delta(-x, k) = \delta(x, k)$ for even $k$
Examples
========
>>> from sympy import DiracDelta, diff, pi
>>> from sympy.abc import x, y
>>> DiracDelta(x)
DiracDelta(x)
>>> DiracDelta(1)
0
>>> DiracDelta(-1)
0
>>> DiracDelta(pi)
0
>>> DiracDelta(x - 4).subs(x, 4)
DiracDelta(0)
>>> diff(DiracDelta(x))
DiracDelta(x, 1)
>>> diff(DiracDelta(x - 1), x, 2)
DiracDelta(x - 1, 2)
>>> diff(DiracDelta(x**2 - 1), x, 2)
2*(2*x**2*DiracDelta(x**2 - 1, 2) + DiracDelta(x**2 - 1, 1))
>>> DiracDelta(3*x).is_simple(x)
True
>>> DiracDelta(x**2).is_simple(x)
False
>>> DiracDelta((x**2 - 1)*y).expand(diracdelta=True, wrt=x)
DiracDelta(x - 1)/(2*Abs(y)) + DiracDelta(x + 1)/(2*Abs(y))
See Also
========
Heaviside
sympy.simplify.simplify.simplify, is_simple
sympy.functions.special.tensor_functions.KroneckerDelta
References
==========
.. [1] https://mathworld.wolfram.com/DeltaFunction.html
"""
is_real = True
def fdiff(self, argindex=1):
"""
Returns the first derivative of a DiracDelta Function.
Explanation
===========
The difference between ``diff()`` and ``fdiff()`` is: ``diff()`` is the
user-level function and ``fdiff()`` is an object method. ``fdiff()`` is
a convenience method available in the ``Function`` class. It returns
the derivative of the function without considering the chain rule.
``diff(function, x)`` calls ``Function._eval_derivative`` which in turn
calls ``fdiff()`` internally to compute the derivative of the function.
Examples
========
>>> from sympy import DiracDelta, diff
>>> from sympy.abc import x
>>> DiracDelta(x).fdiff()
DiracDelta(x, 1)
>>> DiracDelta(x, 1).fdiff()
DiracDelta(x, 2)
>>> DiracDelta(x**2 - 1).fdiff()
DiracDelta(x**2 - 1, 1)
>>> diff(DiracDelta(x, 1)).fdiff()
DiracDelta(x, 3)
Parameters
==========
argindex : integer
degree of derivative
"""
if argindex == 1:
#I didn't know if there is a better way to handle default arguments
k = 0
if len(self.args) > 1:
k = self.args[1]
return self.func(self.args[0], k + 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg, k=S.Zero):
"""
Returns a simplified form or a value of DiracDelta depending on the
argument passed by the DiracDelta object.
Explanation
===========
The ``eval()`` method is automatically called when the ``DiracDelta``
class is about to be instantiated and it returns either some simplified
instance or the unevaluated instance depending on the argument passed.
In other words, ``eval()`` method is not needed to be called explicitly,
it is being called and evaluated once the object is called.
Examples
========
>>> from sympy import DiracDelta, S
>>> from sympy.abc import x
>>> DiracDelta(x)
DiracDelta(x)
>>> DiracDelta(-x, 1)
-DiracDelta(x, 1)
>>> DiracDelta(1)
0
>>> DiracDelta(5, 1)
0
>>> DiracDelta(0)
DiracDelta(0)
>>> DiracDelta(-1)
0
>>> DiracDelta(S.NaN)
nan
>>> DiracDelta(x - 100).subs(x, 5)
0
>>> DiracDelta(x - 100).subs(x, 100)
DiracDelta(0)
Parameters
==========
k : integer
order of derivative
arg : argument passed to DiracDelta
"""
if not k.is_Integer or k.is_negative:
raise ValueError("Error: the second argument of DiracDelta must be \
a non-negative integer, %s given instead." % (k,))
if arg is S.NaN:
return S.NaN
if arg.is_nonzero:
return S.Zero
if fuzzy_not(im(arg).is_zero):
raise ValueError(filldedent('''
Function defined only for Real Values.
Complex part: %s found in %s .''' % (
repr(im(arg)), repr(arg))))
c, nc = arg.args_cnc()
if c and c[0] is S.NegativeOne:
# keep this fast and simple instead of using
# could_extract_minus_sign
if k.is_odd:
return -cls(-arg, k)
elif k.is_even:
return cls(-arg, k) if k else cls(-arg)
elif k.is_zero:
return cls(arg, evaluate=False)
def _eval_expand_diracdelta(self, **hints):
"""
Compute a simplified representation of the function using
property number 4. Pass ``wrt`` as a hint to expand the expression
with respect to a particular variable.
Explanation
===========
``wrt`` is:
- a variable with respect to which a DiracDelta expression will
get expanded.
Examples
========
>>> from sympy import DiracDelta
>>> from sympy.abc import x, y
>>> DiracDelta(x*y).expand(diracdelta=True, wrt=x)
DiracDelta(x)/Abs(y)
>>> DiracDelta(x*y).expand(diracdelta=True, wrt=y)
DiracDelta(y)/Abs(x)
>>> DiracDelta(x**2 + x - 2).expand(diracdelta=True, wrt=x)
DiracDelta(x - 1)/3 + DiracDelta(x + 2)/3
See Also
========
is_simple, Diracdelta
"""
wrt = hints.get('wrt', None)
if wrt is None:
free = self.free_symbols
if len(free) == 1:
wrt = free.pop()
else:
raise TypeError(filldedent('''
When there is more than 1 free symbol or variable in the expression,
the 'wrt' keyword is required as a hint to expand when using the
DiracDelta hint.'''))
if not self.args[0].has(wrt) or (len(self.args) > 1 and self.args[1] != 0 ):
return self
try:
argroots = roots(self.args[0], wrt)
result = 0
valid = True
darg = abs(diff(self.args[0], wrt))
for r, m in argroots.items():
if r.is_real is not False and m == 1:
result += self.func(wrt - r)/darg.subs(wrt, r)
else:
# don't handle non-real and if m != 1 then
# a polynomial will have a zero in the derivative (darg)
# at r
valid = False
break
if valid:
return result
except PolynomialError:
pass
return self
def is_simple(self, x):
"""
Tells whether the argument(args[0]) of DiracDelta is a linear
expression in *x*.
Examples
========
>>> from sympy import DiracDelta, cos
>>> from sympy.abc import x, y
>>> DiracDelta(x*y).is_simple(x)
True
>>> DiracDelta(x*y).is_simple(y)
True
>>> DiracDelta(x**2 + x - 2).is_simple(x)
False
>>> DiracDelta(cos(x)).is_simple(x)
False
Parameters
==========
x : can be a symbol
See Also
========
sympy.simplify.simplify.simplify, DiracDelta
"""
p = self.args[0].as_poly(x)
if p:
return p.degree() == 1
return False
def _eval_rewrite_as_Piecewise(self, *args, **kwargs):
"""
Represents DiracDelta in a piecewise form.
Examples
========
>>> from sympy import DiracDelta, Piecewise, Symbol
>>> x = Symbol('x')
>>> DiracDelta(x).rewrite(Piecewise)
Piecewise((DiracDelta(0), Eq(x, 0)), (0, True))
>>> DiracDelta(x - 5).rewrite(Piecewise)
Piecewise((DiracDelta(0), Eq(x, 5)), (0, True))
>>> DiracDelta(x**2 - 5).rewrite(Piecewise)
Piecewise((DiracDelta(0), Eq(x**2, 5)), (0, True))
>>> DiracDelta(x - 5, 4).rewrite(Piecewise)
DiracDelta(x - 5, 4)
"""
if len(args) == 1:
return Piecewise((DiracDelta(0), Eq(args[0], 0)), (0, True))
def _eval_rewrite_as_SingularityFunction(self, *args, **kwargs):
"""
Returns the DiracDelta expression written in the form of Singularity
Functions.
"""
from sympy.solvers import solve
from sympy.functions.special.singularity_functions import SingularityFunction
if self == DiracDelta(0):
return SingularityFunction(0, 0, -1)
if self == DiracDelta(0, 1):
return SingularityFunction(0, 0, -2)
free = self.free_symbols
if len(free) == 1:
x = (free.pop())
if len(args) == 1:
return SingularityFunction(x, solve(args[0], x)[0], -1)
return SingularityFunction(x, solve(args[0], x)[0], -args[1] - 1)
else:
# I don't know how to handle the case for DiracDelta expressions
# having arguments with more than one variable.
raise TypeError(filldedent('''
rewrite(SingularityFunction) does not support
arguments with more that one variable.'''))
###############################################################################
############################## HEAVISIDE FUNCTION #############################
###############################################################################
| DiracDelta |
python | bokeh__bokeh | tests/support/plugins/file_server.py | {
"start": 2436,
"end": 5784
} | class ____:
"""A very basic web server."""
def __init__(self, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT) -> None:
self.stop_serving = False
while True:
try:
self.server = HTTPServer((host, port), HtmlOnlyHandler)
self.host = host
self.port = port
break
except OSError:
log.debug(f"port {port} is in use, trying to next one")
port += 1
self.thread = threading.Thread(target=self._run_web_server)
def _run_web_server(self) -> None:
"""Runs the server loop."""
log.debug("web server started")
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self) -> None:
"""Starts the server."""
self.thread.start()
def stop(self) -> None:
"""Stops the server."""
self.stop_serving = True
try:
# This is to force stop the server loop
urlopen(f"http://{self.host}:{self.port}")
except OSError:
pass
log.info("Shutting down the webserver")
self.thread.join()
def where_is(self, path: Path) -> str:
path = str(path.relative_to(HTML_ROOT)).replace('\\', '/') # Windows-proof
return f"http://{self.host}:{self.port}/{path}"
@pytest.fixture(scope='session')
def file_server(request: pytest.FixtureRequest) -> SimpleWebServer:
server = SimpleWebServer()
server.start()
request.addfinalizer(server.stop)
return server
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_html_root_error_message = f"Can't find 'common_web' directory, try setting WEBDRIVER environment variable WEBDRIVER: {WEBDRIVER} HTML_ROOT: {HTML_ROOT}"
if not os.path.isdir(HTML_ROOT):
log.error(_html_root_error_message)
assert 0, _html_root_error_message
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Taken from
# https://github.com/SeleniumHQ/selenium/blob/52e9d6407248bce5de2b6a73103a50bb0e670c1f/py/test/selenium/webdriver/common/webserver.py
# with small modifications
| SimpleWebServer |
python | pytorch__pytorch | test/torch_np/test_ndarray_methods.py | {
"start": 21653,
"end": 22037
} | class ____(TestCase):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.amax(a), 10.0)
b = [[3, 6.0, 9.0], [4, 10.0, 5.0], [8, 3.0, 2.0]]
assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0])
assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
arr = np.asarray(a)
assert_equal(np.amax(arr), arr.max())
| TestAmax |
python | plotly__plotly.py | plotly/callbacks.py | {
"start": 2856,
"end": 4595
} | class ____:
def __init__(self, point_inds=[], xs=[], ys=[], trace_name=None, trace_index=None):
self._point_inds = point_inds
self._xs = xs
self._ys = ys
self._trace_name = trace_name
self._trace_index = trace_index
def __repr__(self):
return """\
Points(point_inds={point_inds},
xs={xs},
ys={ys},
trace_name={trace_name},
trace_index={trace_index})""".format(
point_inds=_list_repr_elided(
self.point_inds, indent=len("Points(point_inds=")
),
xs=_list_repr_elided(self.xs, indent=len(" xs=")),
ys=_list_repr_elided(self.ys, indent=len(" ys=")),
trace_name=repr(self.trace_name),
trace_index=repr(self.trace_index),
)
@property
def point_inds(self):
"""
List of selected indexes into the trace's points
Returns
-------
list[int]
"""
return self._point_inds
@property
def xs(self):
"""
List of x-coordinates of selected points
Returns
-------
list[float]
"""
return self._xs
@property
def ys(self):
"""
List of y-coordinates of selected points
Returns
-------
list[float]
"""
return self._ys
@property
def trace_name(self):
"""
Name of the trace
Returns
-------
str
"""
return self._trace_name
@property
def trace_index(self):
"""
Index of the trace in the figure
Returns
-------
int
"""
return self._trace_index
| Points |
python | huggingface__transformers | tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py | {
"start": 48495,
"end": 51351
} | class ____(unittest.TestCase):
@cached_property
def default_processor(self):
return TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") if is_vision_available() else None
@slow
def test_inference_handwritten(self):
model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten").to(torch_device)
dataset = load_dataset("hf-internal-testing/fixtures_ocr", split="train")
image = dataset[1]["image"].convert("RGB")
processor = self.default_processor
pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device)
# forward pass
decoder_input_ids = torch.tensor([[model.config.decoder.decoder_start_token_id]]).to(torch_device)
outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids)
logits = outputs.logits
# verify the logits
expected_shape = torch.Size((1, 1, model.decoder.config.vocab_size))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311]
).to(torch_device)
torch.testing.assert_close(logits[0, 0, :10], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_printed(self):
model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-printed").to(torch_device)
dataset = load_dataset("hf-internal-testing/fixtures_ocr", split="train")
image = dataset[0]["image"].convert("RGB")
processor = self.default_processor
pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device)
# forward pass
decoder_input_ids = torch.tensor([[model.config.decoder.decoder_start_token_id]]).to(torch_device)
outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids)
logits = outputs.logits
# verify the logits
expected_shape = torch.Size((1, 1, model.decoder.config.vocab_size))
self.assertEqual(outputs.logits.shape, expected_shape)
is_pillow_less_than_9 = version.parse(PIL.__version__) < version.parse("9.0.0")
if is_pillow_less_than_9:
expected_slice = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210],
device=torch_device,
)
else:
expected_slice = torch.tensor(
[-5.6844, -5.8372, 1.1518, -6.8984, 6.8587, -2.4453, 1.2347, -1.0241, -1.9649, -3.9109],
device=torch_device,
)
torch.testing.assert_close(logits[0, 0, :10], expected_slice, rtol=1e-4, atol=1e-4)
@require_vision
@require_torch
| TrOCRModelIntegrationTest |
python | pydata__xarray | asv_bench/benchmarks/dataset_io.py | {
"start": 6415,
"end": 7696
} | class ____(IOReadSingleNetCDF4Dask):
def setup(self):
# TODO: Lazily skipped in CI as it is very demanding and slow.
# Improve times and remove errors.
_skip_slow()
requires_dask()
self.make_ds()
self.filepath = "test_single_file.nc3.nc"
self.format = "NETCDF3_64BIT"
self.ds.to_netcdf(self.filepath, format=self.format)
def time_load_dataset_scipy_with_block_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(
self.filepath, engine="scipy", chunks=self.block_chunks
).load()
def time_load_dataset_scipy_with_block_chunks_oindexing(self):
ds = xr.open_dataset(self.filepath, engine="scipy", chunks=self.block_chunks)
ds = ds.isel(**self.oinds).load()
def time_load_dataset_scipy_with_block_chunks_vindexing(self):
ds = xr.open_dataset(self.filepath, engine="scipy", chunks=self.block_chunks)
ds = ds.isel(**self.vinds).load()
def time_load_dataset_scipy_with_time_chunks(self):
with dask.config.set(scheduler="multiprocessing"):
xr.open_dataset(
self.filepath, engine="scipy", chunks=self.time_chunks
).load()
| IOReadSingleNetCDF3Dask |
python | coleifer__peewee | tests/sqlite.py | {
"start": 83035,
"end": 85997
} | class ____(ModelTestCase):
database = SqliteExtDatabase(':memory:', json_contains=True)
requires = [KeyData]
test_data = (
('a', {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}),
('b', {'k2': 'v2', 'k3': 'v3', 'k4': 'v4'}),
('c', {'k3': 'v3', 'x1': {'y1': 'z1', 'y2': 'z2'}}),
('d', {'k4': 'v4', 'x1': {'y2': 'z2', 'y3': [0, 1, 2]}}),
('e', ['foo', 'bar', [0, 1, 2]]),
)
def setUp(self):
super(TestJsonContains, self).setUp()
with self.database.atomic():
for key, data in self.test_data:
KeyData.create(key=key, data=data)
def assertContains(self, obj, expected):
contains = fn.json_contains(KeyData.data, json.dumps(obj))
query = (KeyData
.select(KeyData.key)
.where(contains)
.order_by(KeyData.key)
.namedtuples())
self.assertEqual([m.key for m in query], expected)
def test_json_contains(self):
# Simple checks for key.
self.assertContains('k1', ['a'])
self.assertContains('k2', ['a', 'b'])
self.assertContains('k3', ['a', 'b', 'c'])
self.assertContains('kx', [])
self.assertContains('y1', [])
# Partial dictionary.
self.assertContains({'k1': 'v1'}, ['a'])
self.assertContains({'k2': 'v2'}, ['a', 'b'])
self.assertContains({'k3': 'v3'}, ['a', 'b', 'c'])
self.assertContains({'k2': 'v2', 'k3': 'v3'}, ['a', 'b'])
self.assertContains({'k2': 'vx'}, [])
self.assertContains({'k2': 'v2', 'k3': 'vx'}, [])
self.assertContains({'y1': 'z1'}, [])
# List, interpreted as list of keys.
self.assertContains(['k1', 'k2'], ['a'])
self.assertContains(['k4'], ['b', 'd'])
self.assertContains(['kx'], [])
self.assertContains(['y1'], [])
# List, interpreted as ordered list of items.
self.assertContains(['foo'], ['e'])
self.assertContains(['foo', 'bar'], ['e'])
self.assertContains(['bar', 'foo'], [])
# Nested dictionaries.
self.assertContains({'x1': 'y1'}, ['c'])
self.assertContains({'x1': ['y1']}, ['c'])
self.assertContains({'x1': {'y1': 'z1'}}, ['c'])
self.assertContains({'x1': {'y2': 'z2'}}, ['c', 'd'])
self.assertContains({'x1': {'y2': 'z2'}, 'k4': 'v4'}, ['d'])
self.assertContains({'x1': {'yx': 'z1'}}, [])
self.assertContains({'x1': {'y1': 'z1', 'y3': 'z3'}}, [])
self.assertContains({'x1': {'y2': 'zx'}}, [])
self.assertContains({'x1': {'k4': 'v4'}}, [])
# Mixing dictionaries and lists.
self.assertContains({'x1': {'y2': 'z2', 'y3': [0]}}, ['d'])
self.assertContains({'x1': {'y2': 'z2', 'y3': [0, 1, 2]}}, ['d'])
self.assertContains({'x1': {'y2': 'z2', 'y3': [0, 1, 2, 4]}}, [])
self.assertContains({'x1': {'y2': 'z2', 'y3': [0, 2]}}, [])
| TestJsonContains |
python | scipy__scipy | scipy/signal/tests/test_filter_design.py | {
"start": 156261,
"end": 173793
} | class ____:
def test_degenerate(self, xp):
# 0-order filter is just a passthrough
# Stopband ripple factor doesn't matter
b, a = cheby2(0, 123.456, xp.asarray(1), analog=True)
xp_assert_equal(b, xp.asarray([1.0], dtype=xp.float64))
xp_assert_equal(a, xp.asarray([1.0], dtype=xp.float64))
# 1-order filter is same for all types
b, a = cheby2(1, 10*math.log10(2), xp.asarray(1.), analog=True)
assert_array_almost_equal(b, xp.asarray([1], dtype=xp.float64))
assert_array_almost_equal(a, xp.asarray([1, 1], dtype=xp.float64))
z, p, k = cheby2(1, 50, xp.asarray(0.3), output='zpk')
xp_assert_equal(z, xp.asarray([-1], dtype=xp.complex128))
xp_assert_close(
p, xp.asarray([9.967826460175649e-01 + 0j], dtype=xp.complex128),
rtol=1e-14 if not DEFAULT_F32 else 1e-7
)
assert math.isclose(
k, 1.608676991217512e-03, rel_tol=1e-14 if not DEFAULT_F32 else 1e-6
)
def test_basic(self, xp):
for N in range(25):
wn = xp.asarray(0.01)
z, p, k = cheby2(N, 40, wn, 'low', analog=True, output='zpk')
assert p.shape[0] == N
assert all(xp.real(p) <= 0) # No poles in right half of S-plane
for N in range(25):
wn = xp.asarray(0.01)
z, p, k = cheby2(N, 40, wn, 'high', analog=False, output='zpk')
assert all(xp.abs(p) <= 1) # No poles outside unit circle
B, A = cheby2(18, 100, xp.asarray(0.5))
assert_array_almost_equal(
B, xp.asarray([
0.00167583914216, 0.01249479541868, 0.05282702120282,
0.15939804265706, 0.37690207631117, 0.73227013789108,
1.20191856962356, 1.69522872823393, 2.07598674519837,
2.21972389625291, 2.07598674519838, 1.69522872823395,
1.20191856962359, 0.73227013789110, 0.37690207631118,
0.15939804265707, 0.05282702120282, 0.01249479541868,
0.00167583914216], dtype=xp.float64),
decimal=13 if not DEFAULT_F32 else 6
)
assert_array_almost_equal(
A, xp.asarray([
1.00000000000000, -0.27631970006174, 3.19751214254060,
-0.15685969461355, 4.13926117356269, 0.60689917820044,
2.95082770636540, 0.89016501910416, 1.32135245849798,
0.51502467236824, 0.38906643866660, 0.15367372690642,
0.07255803834919, 0.02422454070134, 0.00756108751837,
0.00179848550988, 0.00033713574499, 0.00004258794833,
0.00000281030149], dtype=xp.float64),
decimal=13 if not DEFAULT_F32 else 6
)
def test_highpass(self, xp):
# high even order
z, p, k = cheby2(26, 60, xp.asarray(0.3), 'high', output='zpk')
z2 = [9.981088955489852e-01 + 6.147058341984388e-02j,
9.981088955489852e-01 - 6.147058341984388e-02j,
9.832702870387426e-01 + 1.821525257215483e-01j,
9.832702870387426e-01 - 1.821525257215483e-01j,
9.550760158089112e-01 + 2.963609353922882e-01j,
9.550760158089112e-01 - 2.963609353922882e-01j,
9.162054748821922e-01 + 4.007087817803773e-01j,
9.162054748821922e-01 - 4.007087817803773e-01j,
8.700619897368064e-01 + 4.929423232136168e-01j,
8.700619897368064e-01 - 4.929423232136168e-01j,
5.889791753434985e-01 + 8.081482110427953e-01j,
5.889791753434985e-01 - 8.081482110427953e-01j,
5.984900456570295e-01 + 8.011302423760501e-01j,
5.984900456570295e-01 - 8.011302423760501e-01j,
6.172880888914629e-01 + 7.867371958365343e-01j,
6.172880888914629e-01 - 7.867371958365343e-01j,
6.448899971038180e-01 + 7.642754030030161e-01j,
6.448899971038180e-01 - 7.642754030030161e-01j,
6.804845629637927e-01 + 7.327624168637228e-01j,
6.804845629637927e-01 - 7.327624168637228e-01j,
8.202619107108660e-01 + 5.719881098737678e-01j,
8.202619107108660e-01 - 5.719881098737678e-01j,
7.228410452536148e-01 + 6.910143437705678e-01j,
7.228410452536148e-01 - 6.910143437705678e-01j,
7.702121399578629e-01 + 6.377877856007792e-01j,
7.702121399578629e-01 - 6.377877856007792e-01j]
p2 = [7.365546198286450e-01 + 4.842085129329526e-02j,
7.365546198286450e-01 - 4.842085129329526e-02j,
7.292038510962885e-01 + 1.442201672097581e-01j,
7.292038510962885e-01 - 1.442201672097581e-01j,
7.151293788040354e-01 + 2.369925800458584e-01j,
7.151293788040354e-01 - 2.369925800458584e-01j,
6.955051820787286e-01 + 3.250341363856910e-01j,
6.955051820787286e-01 - 3.250341363856910e-01j,
6.719122956045220e-01 + 4.070475750638047e-01j,
6.719122956045220e-01 - 4.070475750638047e-01j,
6.461722130611300e-01 + 4.821965916689270e-01j,
6.461722130611300e-01 - 4.821965916689270e-01j,
5.528045062872224e-01 + 8.162920513838372e-01j,
5.528045062872224e-01 - 8.162920513838372e-01j,
5.464847782492791e-01 + 7.869899955967304e-01j,
5.464847782492791e-01 - 7.869899955967304e-01j,
5.488033111260949e-01 + 7.520442354055579e-01j,
5.488033111260949e-01 - 7.520442354055579e-01j,
6.201874719022955e-01 + 5.500894392527353e-01j,
6.201874719022955e-01 - 5.500894392527353e-01j,
5.586478152536709e-01 + 7.112676877332921e-01j,
5.586478152536709e-01 - 7.112676877332921e-01j,
5.958145844148228e-01 + 6.107074340842115e-01j,
5.958145844148228e-01 - 6.107074340842115e-01j,
5.747812938519067e-01 + 6.643001536914696e-01j,
5.747812938519067e-01 - 6.643001536914696e-01j]
z2 = xp.asarray(z2, dtype=xp.complex128)
p2 = xp.asarray(p2, dtype=xp.complex128)
k2 = 6.190427617192018e-04
k2 = 9.932997786497189e-02
xp_assert_close(
_sort_cmplx(z, xp=xp), _sort_cmplx(z2, xp=xp),
rtol=1e-13 if not DEFAULT_F32 else 1e-6
)
xp_assert_close(
_sort_cmplx(p, xp=xp), _sort_cmplx(p2, xp=xp),
rtol=1e-12 if not DEFAULT_F32 else 1e-6
)
assert math.isclose(
k, k2, rel_tol=1e-11 if not DEFAULT_F32 else 1e-6
)
# high odd order
z, p, k = cheby2(25, 80, xp.asarray(0.5), 'high', output='zpk')
z2 = [9.690690376586687e-01 + 2.467897896011971e-01j,
9.690690376586687e-01 - 2.467897896011971e-01j,
9.999999999999492e-01,
8.835111277191199e-01 + 4.684101698261429e-01j,
8.835111277191199e-01 - 4.684101698261429e-01j,
7.613142857900539e-01 + 6.483830335935022e-01j,
7.613142857900539e-01 - 6.483830335935022e-01j,
6.232625173626231e-01 + 7.820126817709752e-01j,
6.232625173626231e-01 - 7.820126817709752e-01j,
4.864456563413621e-01 + 8.737108351316745e-01j,
4.864456563413621e-01 - 8.737108351316745e-01j,
3.618368136816749e-01 + 9.322414495530347e-01j,
3.618368136816749e-01 - 9.322414495530347e-01j,
2.549486883466794e-01 + 9.669545833752675e-01j,
2.549486883466794e-01 - 9.669545833752675e-01j,
1.676175432109457e-01 + 9.858520980390212e-01j,
1.676175432109457e-01 - 9.858520980390212e-01j,
1.975218468277521e-03 + 9.999980492540941e-01j,
1.975218468277521e-03 - 9.999980492540941e-01j,
1.786959496651858e-02 + 9.998403260399917e-01j,
1.786959496651858e-02 - 9.998403260399917e-01j,
9.967933660557139e-02 + 9.950196127985684e-01j,
9.967933660557139e-02 - 9.950196127985684e-01j,
5.013970951219547e-02 + 9.987422137518890e-01j,
5.013970951219547e-02 - 9.987422137518890e-01j]
p2 = [4.218866331906864e-01,
4.120110200127552e-01 + 1.361290593621978e-01j,
4.120110200127552e-01 - 1.361290593621978e-01j,
3.835890113632530e-01 + 2.664910809911026e-01j,
3.835890113632530e-01 - 2.664910809911026e-01j,
3.399195570456499e-01 + 3.863983538639875e-01j,
3.399195570456499e-01 - 3.863983538639875e-01j,
2.855977834508353e-01 + 4.929444399540688e-01j,
2.855977834508353e-01 - 4.929444399540688e-01j,
2.255765441339322e-01 + 5.851631870205766e-01j,
2.255765441339322e-01 - 5.851631870205766e-01j,
1.644087535815792e-01 + 6.637356937277153e-01j,
1.644087535815792e-01 - 6.637356937277153e-01j,
-7.293633845273095e-02 + 9.739218252516307e-01j,
-7.293633845273095e-02 - 9.739218252516307e-01j,
1.058259206358626e-01 + 7.304739464862978e-01j,
1.058259206358626e-01 - 7.304739464862978e-01j,
-5.703971947785402e-02 + 9.291057542169088e-01j,
-5.703971947785402e-02 - 9.291057542169088e-01j,
5.263875132656864e-02 + 7.877974334424453e-01j,
5.263875132656864e-02 - 7.877974334424453e-01j,
-3.007943405982616e-02 + 8.846331716180016e-01j,
-3.007943405982616e-02 - 8.846331716180016e-01j,
6.857277464483946e-03 + 8.383275456264492e-01j,
6.857277464483946e-03 - 8.383275456264492e-01j]
z2 = xp.asarray(z2, dtype=xp.complex128)
p2 = xp.asarray(p2, dtype=xp.complex128)
k2 = 6.507068761705037e-03
xp_assert_close(
_sort_cmplx(z, xp=xp), _sort_cmplx(z2, xp=xp),
rtol=1e-13 if not DEFAULT_F32 else 1e-6
)
xp_assert_close(
_sort_cmplx(p, xp=xp), _sort_cmplx(p2, xp=xp),
rtol=1e-12 if not DEFAULT_F32 else 1e-6
)
assert math.isclose(k, k2, rel_tol=1e-11 if not DEFAULT_F32 else 1e-6)
def test_bandpass(self, xp):
z, p, k = cheby2(9, 40, xp.asarray([0.07, 0.2]), 'pass', output='zpk')
z2 = [-9.999999999999999e-01,
3.676588029658514e-01 + 9.299607543341383e-01j,
3.676588029658514e-01 - 9.299607543341383e-01j,
7.009689684982283e-01 + 7.131917730894889e-01j,
7.009689684982283e-01 - 7.131917730894889e-01j,
7.815697973765858e-01 + 6.238178033919218e-01j,
7.815697973765858e-01 - 6.238178033919218e-01j,
8.063793628819866e-01 + 5.913986160941200e-01j,
8.063793628819866e-01 - 5.913986160941200e-01j,
1.000000000000001e+00,
9.944493019920448e-01 + 1.052168511576739e-01j,
9.944493019920448e-01 - 1.052168511576739e-01j,
9.854674703367308e-01 + 1.698642543566085e-01j,
9.854674703367308e-01 - 1.698642543566085e-01j,
9.762751735919308e-01 + 2.165335665157851e-01j,
9.762751735919308e-01 - 2.165335665157851e-01j,
9.792277171575134e-01 + 2.027636011479496e-01j,
9.792277171575134e-01 - 2.027636011479496e-01j]
p2 = [8.143803410489621e-01 + 5.411056063397541e-01j,
8.143803410489621e-01 - 5.411056063397541e-01j,
7.650769827887418e-01 + 5.195412242095543e-01j,
7.650769827887418e-01 - 5.195412242095543e-01j,
6.096241204063443e-01 + 3.568440484659796e-01j,
6.096241204063443e-01 - 3.568440484659796e-01j,
6.918192770246239e-01 + 4.770463577106911e-01j,
6.918192770246239e-01 - 4.770463577106911e-01j,
6.986241085779207e-01 + 1.146512226180060e-01j,
6.986241085779207e-01 - 1.146512226180060e-01j,
8.654645923909734e-01 + 1.604208797063147e-01j,
8.654645923909734e-01 - 1.604208797063147e-01j,
9.164831670444591e-01 + 1.969181049384918e-01j,
9.164831670444591e-01 - 1.969181049384918e-01j,
9.630425777594550e-01 + 2.317513360702271e-01j,
9.630425777594550e-01 - 2.317513360702271e-01j,
9.438104703725529e-01 + 2.193509900269860e-01j,
9.438104703725529e-01 - 2.193509900269860e-01j]
z2 = xp.asarray(z2, dtype=xp.complex128)
p2 = xp.asarray(p2, dtype=xp.complex128)
k2 = 9.345352824659604e-03
xp_assert_close(
_sort_cmplx(z, xp=xp), _sort_cmplx(z2, xp=xp),
rtol=1e-13 if not DEFAULT_F32 else 1e-6
)
xp_assert_close(
_sort_cmplx(p, xp=xp), _sort_cmplx(p2, xp=xp),
rtol=1e-13 if not DEFAULT_F32 else 1e-6
)
assert math.isclose(k, k2, rel_tol=1e-11 if not DEFAULT_F32 else 1e-6)
def test_bandstop(self, xp):
z, p, k = cheby2(6, 55, xp.asarray([0.1, 0.9]), 'stop', output='zpk')
z2 = [6.230544895101009e-01 + 7.821784343111114e-01j,
6.230544895101009e-01 - 7.821784343111114e-01j,
9.086608545660115e-01 + 4.175349702471991e-01j,
9.086608545660115e-01 - 4.175349702471991e-01j,
9.478129721465802e-01 + 3.188268649763867e-01j,
9.478129721465802e-01 - 3.188268649763867e-01j,
-6.230544895100982e-01 + 7.821784343111109e-01j,
-6.230544895100982e-01 - 7.821784343111109e-01j,
-9.086608545660116e-01 + 4.175349702472088e-01j,
-9.086608545660116e-01 - 4.175349702472088e-01j,
-9.478129721465784e-01 + 3.188268649763897e-01j,
-9.478129721465784e-01 - 3.188268649763897e-01j]
p2 = [-9.464094036167638e-01 + 1.720048695084344e-01j,
-9.464094036167638e-01 - 1.720048695084344e-01j,
-8.715844103386737e-01 + 1.370665039509297e-01j,
-8.715844103386737e-01 - 1.370665039509297e-01j,
-8.078751204586425e-01 + 5.729329866682983e-02j,
-8.078751204586425e-01 - 5.729329866682983e-02j,
9.464094036167665e-01 + 1.720048695084332e-01j,
9.464094036167665e-01 - 1.720048695084332e-01j,
8.078751204586447e-01 + 5.729329866683007e-02j,
8.078751204586447e-01 - 5.729329866683007e-02j,
8.715844103386721e-01 + 1.370665039509331e-01j,
8.715844103386721e-01 - 1.370665039509331e-01j]
z2 = xp.asarray(z2, dtype=xp.complex128)
p2 = xp.asarray(p2, dtype=xp.complex128)
k2 = 2.917823332763358e-03
xp_assert_close(
_sort_cmplx(z, xp=xp), _sort_cmplx(z2, xp=xp),
rtol=1e-13 if not DEFAULT_F32 else 1e-6
)
xp_assert_close(
_sort_cmplx(p, xp=xp), _sort_cmplx(p2, xp=xp),
rtol=1e-13 if not DEFAULT_F32 else 1e-6
)
assert math.isclose(k, k2, rel_tol=1e-11 if not DEFAULT_F32 else 1e-6)
@xfail_xp_backends("cupy", reason="inaccurate on CuPy")
def test_ba_output(self, xp):
# with transfer function conversion, without digital conversion
b, a = cheby2(5, 20, xp.asarray([2010, 2100]), 'stop', True)
b2 = [1.000000000000000e+00, 0, # Matlab: 6.683253076978249e-12,
2.111512500000000e+07, 0, # Matlab: 1.134325604589552e-04,
1.782966433781250e+14, 0, # Matlab: 7.216787944356781e+02,
7.525901316990656e+20, 0, # Matlab: 2.039829265789886e+09,
1.587960565565748e+27, 0, # Matlab: 2.161236218626134e+15,
1.339913493808585e+33]
a2 = [1.000000000000000e+00, 1.849550755473371e+02,
2.113222918998538e+07, 3.125114149732283e+09,
1.785133457155609e+14, 1.979158697776348e+16,
7.535048322653831e+20, 5.567966191263037e+22,
1.589246884221346e+27, 5.871210648525566e+28,
1.339913493808590e+33]
b2 = xp.asarray(b2, dtype=xp.float64)
a2 = xp.asarray(a2, dtype=xp.float64)
xp_assert_close(b, b2, rtol=5e-14 if not DEFAULT_F32 else 1e-7)
xp_assert_close(a, a2, rtol=5e-14 if not DEFAULT_F32 else 1e-7)
def test_fs_param(self):
for fs in (900, 900.1, 1234.567):
for N in (0, 1, 2, 3, 10):
for fc in (100, 100.1, 432.12345):
for btype in ('lp', 'hp'):
ba1 = cheby2(N, 20, fc, btype, fs=fs)
ba2 = cheby2(N, 20, fc/(fs/2), btype)
for ba1_, ba2_ in zip(ba1, ba2):
xp_assert_close(ba1_, ba2_)
for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
for btype in ('bp', 'bs'):
ba1 = cheby2(N, 20, fc, btype, fs=fs)
for seq in (list, tuple, array):
fcnorm = seq([f/(fs/2) for f in fc])
ba2 = cheby2(N, 20, fcnorm, btype)
for ba1_, ba2_ in zip(ba1, ba2):
xp_assert_close(ba1_, ba2_)
@skip_xp_backends("dask.array", reason="https://github.com/dask/dask/issues/11883")
@make_xp_test_case(ellip)
| TestCheby2 |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 7686,
"end": 7968
} | class ____(Message):
message = "'...'.format(...) has unused arguments at position(s): %s"
def __init__(self, filename, loc, extra_positions):
Message.__init__(self, filename, loc)
self.message_args = (extra_positions,)
| StringDotFormatExtraPositionalArguments |
python | h5py__h5py | h5py/tests/test_h5f.py | {
"start": 438,
"end": 1704
} | class ____(TestCase):
def test_descriptor_core(self):
with File(self.mktemp(), driver='core',
backing_store=False, mode='x') as f:
assert isinstance(f.id.get_vfd_handle(), int)
def test_descriptor_sec2(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.test_descriptor_sec2')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
try:
with File(fn_h5, driver='sec2', mode='x') as f:
descriptor = f.id.get_vfd_handle()
self.assertNotEqual(descriptor, 0)
os.fsync(descriptor)
finally:
shutil.rmtree(dn_tmp)
@ut.skipUnless(direct_vfd,
"DIRECT driver is supported on Linux if hdf5 is "
"built with the appriorate flags.")
def test_descriptor_direct(self):
dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestFileID.test_descriptor_direct')
fn_h5 = os.path.join(dn_tmp, 'test.h5')
try:
with File(fn_h5, driver='direct', mode='x') as f:
descriptor = f.id.get_vfd_handle()
self.assertNotEqual(descriptor, 0)
os.fsync(descriptor)
finally:
shutil.rmtree(dn_tmp)
| TestFileID |
python | run-llama__llama_index | llama-index-core/llama_index/core/base/llms/types.py | {
"start": 20318,
"end": 20564
} | class ____(BaseModel):
"""LogProb of a token."""
token: str = Field(default_factory=str)
logprob: float = Field(default_factory=float)
bytes: List[int] = Field(default_factory=list)
# ===== Generic Model Output - Chat =====
| LogProb |
python | pypa__pip | src/pip/_internal/metadata/base.py | {
"start": 25159,
"end": 25420
} | class ____(Wheel):
def __init__(self, location: str, stream: IO[bytes]) -> None:
self.location = location
self.stream = stream
def as_zipfile(self) -> zipfile.ZipFile:
return zipfile.ZipFile(self.stream, allowZip64=True)
| MemoryWheel |
python | pandas-dev__pandas | pandas/tests/indexes/test_engines.py | {
"start": 4787,
"end": 6699
} | class ____:
engine_type = libindex.ObjectEngine
dtype = np.object_
values = list("abc")
def test_is_monotonic(self):
num = 1000
arr = np.array(["a"] * num + ["a"] * num + ["c"] * num, dtype=self.dtype)
# monotonic increasing
engine = self.engine_type(arr)
assert engine.is_monotonic_increasing is True
assert engine.is_monotonic_decreasing is False
# monotonic decreasing
engine = self.engine_type(arr[::-1])
assert engine.is_monotonic_increasing is False
assert engine.is_monotonic_decreasing is True
# neither monotonic increasing or decreasing
arr = np.array(["a"] * num + ["b"] * num + ["a"] * num, dtype=self.dtype)
engine = self.engine_type(arr[::-1])
assert engine.is_monotonic_increasing is False
assert engine.is_monotonic_decreasing is False
def test_is_unique(self):
# unique
arr = np.array(self.values, dtype=self.dtype)
engine = self.engine_type(arr)
assert engine.is_unique is True
# not unique
arr = np.array(["a", "b", "a"], dtype=self.dtype)
engine = self.engine_type(arr)
assert engine.is_unique is False
def test_get_loc(self):
# unique
arr = np.array(self.values, dtype=self.dtype)
engine = self.engine_type(arr)
assert engine.get_loc("b") == 1
# monotonic
num = 1000
arr = np.array(["a"] * num + ["b"] * num + ["c"] * num, dtype=self.dtype)
engine = self.engine_type(arr)
assert engine.get_loc("b") == slice(1000, 2000)
# not monotonic
arr = np.array(self.values * num, dtype=self.dtype)
engine = self.engine_type(arr)
expected = np.array([False, True, False] * num, dtype=bool)
result = engine.get_loc("b")
assert (result == expected).all()
| TestObjectEngine |
python | tiangolo__fastapi | tests/test_additional_properties.py | {
"start": 145,
"end": 3688
} | class ____(BaseModel):
items: Dict[str, int]
@app.post("/foo")
def foo(items: Items):
return items.items
client = TestClient(app)
def test_additional_properties_post():
response = client.post("/foo", json={"items": {"foo": 1, "bar": 2}})
assert response.status_code == 200, response.text
assert response.json() == {"foo": 1, "bar": 2}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/foo": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Foo",
"operationId": "foo_foo_post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Items"}
}
},
"required": True,
},
}
}
},
"components": {
"schemas": {
"Items": {
"title": "Items",
"required": ["items"],
"type": "object",
"properties": {
"items": {
"title": "Items",
"type": "object",
"additionalProperties": {"type": "integer"},
}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
| Items |
python | pypa__warehouse | warehouse/attestations/services.py | {
"start": 2692,
"end": 3853
} | class ____:
def __init__(self, session):
warnings.warn(
"NullIntegrityService is intended only for use in development, "
"you should not use it in production due to the lack of actual "
"attestation verification.",
InsecureIntegrityServiceWarning,
)
self.db = session
@classmethod
def create_service(cls, _context, request):
return cls(session=request.db)
def parse_attestations(
self, request: Request, _distribution: Distribution
) -> list[Attestation]:
return _extract_attestations_from_request(request)
def build_provenance(
self, request: Request, file: File, attestations: list[Attestation]
) -> DatabaseProvenance:
attestation_bundle = AttestationBundle(
publisher=request.oidc_publisher.attestation_identity,
attestations=attestations,
)
provenance = Provenance(attestation_bundles=[attestation_bundle]).model_dump(
mode="json"
)
return DatabaseProvenance(file=file, provenance=provenance)
@implementer(IIntegrityService)
| NullIntegrityService |
python | tiangolo__fastapi | fastapi/applications.py | {
"start": 1656,
"end": 180300
} | class ____(Starlette):
"""
`FastAPI` app class, the main entrypoint to use FastAPI.
Read more in the
[FastAPI docs for First Steps](https://fastapi.tiangolo.com/tutorial/first-steps/).
## Example
```python
from fastapi import FastAPI
app = FastAPI()
```
"""
def __init__(
self: AppType,
*,
debug: Annotated[
bool,
Doc(
"""
Boolean indicating if debug tracebacks should be returned on server
errors.
Read more in the
[Starlette docs for Applications](https://www.starlette.dev/applications/#instantiating-the-application).
"""
),
] = False,
routes: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
**Note**: you probably shouldn't use this parameter, it is inherited
from Starlette and supported for compatibility.
---
A list of routes to serve incoming HTTP and WebSocket requests.
"""
),
deprecated(
"""
You normally wouldn't use this parameter with FastAPI, it is inherited
from Starlette and supported for compatibility.
In FastAPI, you normally would use the *path operation methods*,
like `app.get()`, `app.post()`, etc.
"""
),
] = None,
title: Annotated[
str,
Doc(
"""
The title of the API.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more in the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api).
**Example**
```python
from fastapi import FastAPI
app = FastAPI(title="ChimichangApp")
```
"""
),
] = "FastAPI",
summary: Annotated[
Optional[str],
Doc(
"""
A short summary of the API.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more in the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api).
**Example**
```python
from fastapi import FastAPI
app = FastAPI(summary="Deadpond's favorite app. Nuff said.")
```
"""
),
] = None,
description: Annotated[
str,
Doc(
'''
A description of the API. Supports Markdown (using
[CommonMark syntax](https://commonmark.org/)).
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more in the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api).
**Example**
```python
from fastapi import FastAPI
app = FastAPI(
description="""
ChimichangApp API helps you do awesome stuff. 🚀
## Items
You can **read items**.
## Users
You will be able to:
* **Create users** (_not implemented_).
* **Read users** (_not implemented_).
"""
)
```
'''
),
] = "",
version: Annotated[
str,
Doc(
"""
The version of the API.
**Note** This is the version of your application, not the version of
the OpenAPI specification nor the version of FastAPI being used.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more in the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api).
**Example**
```python
from fastapi import FastAPI
app = FastAPI(version="0.0.1")
```
"""
),
] = "0.1.0",
openapi_url: Annotated[
Optional[str],
Doc(
"""
The URL where the OpenAPI schema will be served from.
If you set it to `None`, no OpenAPI schema will be served publicly, and
the default automatic endpoints `/docs` and `/redoc` will also be
disabled.
Read more in the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#openapi-url).
**Example**
```python
from fastapi import FastAPI
app = FastAPI(openapi_url="/api/v1/openapi.json")
```
"""
),
] = "/openapi.json",
openapi_tags: Annotated[
Optional[List[Dict[str, Any]]],
Doc(
"""
A list of tags used by OpenAPI, these are the same `tags` you can set
in the *path operations*, like:
* `@app.get("/users/", tags=["users"])`
* `@app.get("/items/", tags=["items"])`
The order of the tags can be used to specify the order shown in
tools like Swagger UI, used in the automatic path `/docs`.
It's not required to specify all the tags used.
The tags that are not declared MAY be organized randomly or based
on the tools' logic. Each tag name in the list MUST be unique.
The value of each item is a `dict` containing:
* `name`: The name of the tag.
* `description`: A short description of the tag.
[CommonMark syntax](https://commonmark.org/) MAY be used for rich
text representation.
* `externalDocs`: Additional external documentation for this tag. If
provided, it would contain a `dict` with:
* `description`: A short description of the target documentation.
[CommonMark syntax](https://commonmark.org/) MAY be used for
rich text representation.
* `url`: The URL for the target documentation. Value MUST be in
the form of a URL.
Read more in the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-tags).
**Example**
```python
from fastapi import FastAPI
tags_metadata = [
{
"name": "users",
"description": "Operations with users. The **login** logic is also here.",
},
{
"name": "items",
"description": "Manage items. So _fancy_ they have their own docs.",
"externalDocs": {
"description": "Items external docs",
"url": "https://fastapi.tiangolo.com/",
},
},
]
app = FastAPI(openapi_tags=tags_metadata)
```
"""
),
] = None,
servers: Annotated[
Optional[List[Dict[str, Union[str, Any]]]],
Doc(
"""
A `list` of `dict`s with connectivity information to a target server.
You would use it, for example, if your application is served from
different domains and you want to use the same Swagger UI in the
browser to interact with each of them (instead of having multiple
browser tabs open). Or if you want to leave fixed the possible URLs.
If the servers `list` is not provided, or is an empty `list`, the
default value would be a `dict` with a `url` value of `/`.
Each item in the `list` is a `dict` containing:
* `url`: A URL to the target host. This URL supports Server Variables
and MAY be relative, to indicate that the host location is relative
to the location where the OpenAPI document is being served. Variable
substitutions will be made when a variable is named in `{`brackets`}`.
* `description`: An optional string describing the host designated by
the URL. [CommonMark syntax](https://commonmark.org/) MAY be used for
rich text representation.
* `variables`: A `dict` between a variable name and its value. The value
is used for substitution in the server's URL template.
Read more in the
[FastAPI docs for Behind a Proxy](https://fastapi.tiangolo.com/advanced/behind-a-proxy/#additional-servers).
**Example**
```python
from fastapi import FastAPI
app = FastAPI(
servers=[
{"url": "https://stag.example.com", "description": "Staging environment"},
{"url": "https://prod.example.com", "description": "Production environment"},
]
)
```
"""
),
] = None,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of global dependencies, they will be applied to each
*path operation*, including in sub-routers.
Read more about it in the
[FastAPI docs for Global Dependencies](https://fastapi.tiangolo.com/tutorial/dependencies/global-dependencies/).
**Example**
```python
from fastapi import Depends, FastAPI
from .dependencies import func_dep_1, func_dep_2
app = FastAPI(dependencies=[Depends(func_dep_1), Depends(func_dep_2)])
```
"""
),
] = None,
default_response_class: Annotated[
Type[Response],
Doc(
"""
The default response class to be used.
Read more in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#default-response-class).
**Example**
```python
from fastapi import FastAPI
from fastapi.responses import ORJSONResponse
app = FastAPI(default_response_class=ORJSONResponse)
```
"""
),
] = Default(JSONResponse),
redirect_slashes: Annotated[
bool,
Doc(
"""
Whether to detect and redirect slashes in URLs when the client doesn't
use the same format.
**Example**
```python
from fastapi import FastAPI
app = FastAPI(redirect_slashes=True) # the default
@app.get("/items/")
async def read_items():
return [{"item_id": "Foo"}]
```
With this app, if a client goes to `/items` (without a trailing slash),
they will be automatically redirected with an HTTP status code of 307
to `/items/`.
"""
),
] = True,
docs_url: Annotated[
Optional[str],
Doc(
"""
The path to the automatic interactive API documentation.
It is handled in the browser by Swagger UI.
The default URL is `/docs`. You can disable it by setting it to `None`.
If `openapi_url` is set to `None`, this will be automatically disabled.
Read more in the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#docs-urls).
**Example**
```python
from fastapi import FastAPI
app = FastAPI(docs_url="/documentation", redoc_url=None)
```
"""
),
] = "/docs",
redoc_url: Annotated[
Optional[str],
Doc(
"""
The path to the alternative automatic interactive API documentation
provided by ReDoc.
The default URL is `/redoc`. You can disable it by setting it to `None`.
If `openapi_url` is set to `None`, this will be automatically disabled.
Read more in the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#docs-urls).
**Example**
```python
from fastapi import FastAPI
app = FastAPI(docs_url="/documentation", redoc_url="redocumentation")
```
"""
),
] = "/redoc",
swagger_ui_oauth2_redirect_url: Annotated[
Optional[str],
Doc(
"""
The OAuth2 redirect endpoint for the Swagger UI.
By default it is `/docs/oauth2-redirect`.
This is only used if you use OAuth2 (with the "Authorize" button)
with Swagger UI.
"""
),
] = "/docs/oauth2-redirect",
swagger_ui_init_oauth: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
OAuth2 configuration for the Swagger UI, by default shown at `/docs`.
Read more about the available configuration options in the
[Swagger UI docs](https://swagger.io/docs/open-source-tools/swagger-ui/usage/oauth2/).
"""
),
] = None,
middleware: Annotated[
Optional[Sequence[Middleware]],
Doc(
"""
List of middleware to be added when creating the application.
In FastAPI you would normally do this with `app.add_middleware()`
instead.
Read more in the
[FastAPI docs for Middleware](https://fastapi.tiangolo.com/tutorial/middleware/).
"""
),
] = None,
exception_handlers: Annotated[
Optional[
Dict[
Union[int, Type[Exception]],
Callable[[Request, Any], Coroutine[Any, Any, Response]],
]
],
Doc(
"""
A dictionary with handlers for exceptions.
In FastAPI, you would normally use the decorator
`@app.exception_handler()`.
Read more in the
[FastAPI docs for Handling Errors](https://fastapi.tiangolo.com/tutorial/handling-errors/).
"""
),
] = None,
on_startup: Annotated[
Optional[Sequence[Callable[[], Any]]],
Doc(
"""
A list of startup event handler functions.
You should instead use the `lifespan` handlers.
Read more in the [FastAPI docs for `lifespan`](https://fastapi.tiangolo.com/advanced/events/).
"""
),
] = None,
on_shutdown: Annotated[
Optional[Sequence[Callable[[], Any]]],
Doc(
"""
A list of shutdown event handler functions.
You should instead use the `lifespan` handlers.
Read more in the
[FastAPI docs for `lifespan`](https://fastapi.tiangolo.com/advanced/events/).
"""
),
] = None,
lifespan: Annotated[
Optional[Lifespan[AppType]],
Doc(
"""
A `Lifespan` context manager handler. This replaces `startup` and
`shutdown` functions with a single context manager.
Read more in the
[FastAPI docs for `lifespan`](https://fastapi.tiangolo.com/advanced/events/).
"""
),
] = None,
terms_of_service: Annotated[
Optional[str],
Doc(
"""
A URL to the Terms of Service for your API.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more at the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api).
**Example**
```python
app = FastAPI(terms_of_service="http://example.com/terms/")
```
"""
),
] = None,
contact: Annotated[
Optional[Dict[str, Union[str, Any]]],
Doc(
"""
A dictionary with the contact information for the exposed API.
It can contain several fields.
* `name`: (`str`) The name of the contact person/organization.
* `url`: (`str`) A URL pointing to the contact information. MUST be in
the format of a URL.
* `email`: (`str`) The email address of the contact person/organization.
MUST be in the format of an email address.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more at the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api).
**Example**
```python
app = FastAPI(
contact={
"name": "Deadpoolio the Amazing",
"url": "http://x-force.example.com/contact/",
"email": "dp@x-force.example.com",
}
)
```
"""
),
] = None,
license_info: Annotated[
Optional[Dict[str, Union[str, Any]]],
Doc(
"""
A dictionary with the license information for the exposed API.
It can contain several fields.
* `name`: (`str`) **REQUIRED** (if a `license_info` is set). The
license name used for the API.
* `identifier`: (`str`) An [SPDX](https://spdx.dev/) license expression
for the API. The `identifier` field is mutually exclusive of the `url`
field. Available since OpenAPI 3.1.0, FastAPI 0.99.0.
* `url`: (`str`) A URL to the license used for the API. This MUST be
the format of a URL.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more at the
[FastAPI docs for Metadata and Docs URLs](https://fastapi.tiangolo.com/tutorial/metadata/#metadata-for-api).
**Example**
```python
app = FastAPI(
license_info={
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
}
)
```
"""
),
] = None,
openapi_prefix: Annotated[
str,
Doc(
"""
A URL prefix for the OpenAPI URL.
"""
),
deprecated(
"""
"openapi_prefix" has been deprecated in favor of "root_path", which
follows more closely the ASGI standard, is simpler, and more
automatic.
"""
),
] = "",
root_path: Annotated[
str,
Doc(
"""
A path prefix handled by a proxy that is not seen by the application
but is seen by external clients, which affects things like Swagger UI.
Read more about it at the
[FastAPI docs for Behind a Proxy](https://fastapi.tiangolo.com/advanced/behind-a-proxy/).
**Example**
```python
from fastapi import FastAPI
app = FastAPI(root_path="/api/v1")
```
"""
),
] = "",
root_path_in_servers: Annotated[
bool,
Doc(
"""
To disable automatically generating the URLs in the `servers` field
in the autogenerated OpenAPI using the `root_path`.
Read more about it in the
[FastAPI docs for Behind a Proxy](https://fastapi.tiangolo.com/advanced/behind-a-proxy/#disable-automatic-server-from-root_path).
**Example**
```python
from fastapi import FastAPI
app = FastAPI(root_path_in_servers=False)
```
"""
),
] = True,
responses: Annotated[
Optional[Dict[Union[int, str], Dict[str, Any]]],
Doc(
"""
Additional responses to be shown in OpenAPI.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Additional Responses in OpenAPI](https://fastapi.tiangolo.com/advanced/additional-responses/).
And in the
[FastAPI docs for Bigger Applications](https://fastapi.tiangolo.com/tutorial/bigger-applications/#include-an-apirouter-with-a-custom-prefix-tags-responses-and-dependencies).
"""
),
] = None,
callbacks: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
OpenAPI callbacks that should apply to all *path operations*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/).
"""
),
] = None,
webhooks: Annotated[
Optional[routing.APIRouter],
Doc(
"""
Add OpenAPI webhooks. This is similar to `callbacks` but it doesn't
depend on specific *path operations*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
**Note**: This is available since OpenAPI 3.1.0, FastAPI 0.99.0.
Read more about it in the
[FastAPI docs for OpenAPI Webhooks](https://fastapi.tiangolo.com/advanced/openapi-webhooks/).
"""
),
] = None,
deprecated: Annotated[
Optional[bool],
Doc(
"""
Mark all *path operations* as deprecated. You probably don't need it,
but it's available.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
include_in_schema: Annotated[
bool,
Doc(
"""
To include (or not) all the *path operations* in the generated OpenAPI.
You probably don't need it, but it's available.
This affects the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-parameters-from-openapi).
"""
),
] = True,
swagger_ui_parameters: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
Parameters to configure Swagger UI, the autogenerated interactive API
documentation (by default at `/docs`).
Read more about it in the
[FastAPI docs about how to Configure Swagger UI](https://fastapi.tiangolo.com/how-to/configure-swagger-ui/).
"""
),
] = None,
generate_unique_id_function: Annotated[
Callable[[routing.APIRoute], str],
Doc(
"""
Customize the function used to generate unique IDs for the *path
operations* shown in the generated OpenAPI.
This is particularly useful when automatically generating clients or
SDKs for your API.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = Default(generate_unique_id),
separate_input_output_schemas: Annotated[
bool,
Doc(
"""
Whether to generate separate OpenAPI schemas for request body and
response body when the results would be more precise.
This is particularly useful when automatically generating clients.
For example, if you have a model like:
```python
from pydantic import BaseModel
class Item(BaseModel):
name: str
tags: list[str] = []
```
When `Item` is used for input, a request body, `tags` is not required,
the client doesn't have to provide it.
But when using `Item` for output, for a response body, `tags` is always
available because it has a default value, even if it's just an empty
list. So, the client should be able to always expect it.
In this case, there would be two different schemas, one for input and
another one for output.
"""
),
] = True,
openapi_external_docs: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
This field allows you to provide additional external documentation links.
If provided, it must be a dictionary containing:
* `description`: A brief description of the external documentation.
* `url`: The URL pointing to the external documentation. The value **MUST**
be a valid URL format.
**Example**:
```python
from fastapi import FastAPI
external_docs = {
"description": "Detailed API Reference",
"url": "https://example.com/api-docs",
}
app = FastAPI(openapi_external_docs=external_docs)
```
"""
),
] = None,
**extra: Annotated[
Any,
Doc(
"""
Extra keyword arguments to be stored in the app, not used by FastAPI
anywhere.
"""
),
],
) -> None:
self.debug = debug
self.title = title
self.summary = summary
self.description = description
self.version = version
self.terms_of_service = terms_of_service
self.contact = contact
self.license_info = license_info
self.openapi_url = openapi_url
self.openapi_tags = openapi_tags
self.root_path_in_servers = root_path_in_servers
self.docs_url = docs_url
self.redoc_url = redoc_url
self.swagger_ui_oauth2_redirect_url = swagger_ui_oauth2_redirect_url
self.swagger_ui_init_oauth = swagger_ui_init_oauth
self.swagger_ui_parameters = swagger_ui_parameters
self.servers = servers or []
self.separate_input_output_schemas = separate_input_output_schemas
self.openapi_external_docs = openapi_external_docs
self.extra = extra
self.openapi_version: Annotated[
str,
Doc(
"""
The version string of OpenAPI.
FastAPI will generate OpenAPI version 3.1.0, and will output that as
the OpenAPI version. But some tools, even though they might be
compatible with OpenAPI 3.1.0, might not recognize it as a valid.
So you could override this value to trick those tools into using
the generated OpenAPI. Have in mind that this is a hack. But if you
avoid using features added in OpenAPI 3.1.0, it might work for your
use case.
This is not passed as a parameter to the `FastAPI` class to avoid
giving the false idea that FastAPI would generate a different OpenAPI
schema. It is only available as an attribute.
**Example**
```python
from fastapi import FastAPI
app = FastAPI()
app.openapi_version = "3.0.2"
```
"""
),
] = "3.1.0"
self.openapi_schema: Optional[Dict[str, Any]] = None
if self.openapi_url:
assert self.title, "A title must be provided for OpenAPI, e.g.: 'My API'"
assert self.version, "A version must be provided for OpenAPI, e.g.: '2.1.0'"
# TODO: remove when discarding the openapi_prefix parameter
if openapi_prefix:
logger.warning(
'"openapi_prefix" has been deprecated in favor of "root_path", which '
"follows more closely the ASGI standard, is simpler, and more "
"automatic. Check the docs at "
"https://fastapi.tiangolo.com/advanced/sub-applications/"
)
self.webhooks: Annotated[
routing.APIRouter,
Doc(
"""
The `app.webhooks` attribute is an `APIRouter` with the *path
operations* that will be used just for documentation of webhooks.
Read more about it in the
[FastAPI docs for OpenAPI Webhooks](https://fastapi.tiangolo.com/advanced/openapi-webhooks/).
"""
),
] = webhooks or routing.APIRouter()
self.root_path = root_path or openapi_prefix
self.state: Annotated[
State,
Doc(
"""
A state object for the application. This is the same object for the
entire application, it doesn't change from request to request.
You normally wouldn't use this in FastAPI, for most of the cases you
would instead use FastAPI dependencies.
This is simply inherited from Starlette.
Read more about it in the
[Starlette docs for Applications](https://www.starlette.dev/applications/#storing-state-on-the-app-instance).
"""
),
] = State()
self.dependency_overrides: Annotated[
Dict[Callable[..., Any], Callable[..., Any]],
Doc(
"""
A dictionary with overrides for the dependencies.
Each key is the original dependency callable, and the value is the
actual dependency that should be called.
This is for testing, to replace expensive dependencies with testing
versions.
Read more about it in the
[FastAPI docs for Testing Dependencies with Overrides](https://fastapi.tiangolo.com/advanced/testing-dependencies/).
"""
),
] = {}
self.router: routing.APIRouter = routing.APIRouter(
routes=routes,
redirect_slashes=redirect_slashes,
dependency_overrides_provider=self,
on_startup=on_startup,
on_shutdown=on_shutdown,
lifespan=lifespan,
default_response_class=default_response_class,
dependencies=dependencies,
callbacks=callbacks,
deprecated=deprecated,
include_in_schema=include_in_schema,
responses=responses,
generate_unique_id_function=generate_unique_id_function,
)
self.exception_handlers: Dict[
Any, Callable[[Request, Any], Union[Response, Awaitable[Response]]]
] = {} if exception_handlers is None else dict(exception_handlers)
self.exception_handlers.setdefault(HTTPException, http_exception_handler)
self.exception_handlers.setdefault(
RequestValidationError, request_validation_exception_handler
)
self.exception_handlers.setdefault(
WebSocketRequestValidationError,
# Starlette still has incorrect type specification for the handlers
websocket_request_validation_exception_handler, # type: ignore
)
self.user_middleware: List[Middleware] = (
[] if middleware is None else list(middleware)
)
self.middleware_stack: Union[ASGIApp, None] = None
self.setup()
def build_middleware_stack(self) -> ASGIApp:
# Duplicate/override from Starlette to add AsyncExitStackMiddleware
# inside of ExceptionMiddleware, inside of custom user middlewares
debug = self.debug
error_handler = None
exception_handlers: dict[Any, ExceptionHandler] = {}
for key, value in self.exception_handlers.items():
if key in (500, Exception):
error_handler = value
else:
exception_handlers[key] = value
middleware = (
[Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)]
+ self.user_middleware
+ [
Middleware(
ExceptionMiddleware, handlers=exception_handlers, debug=debug
),
# Add FastAPI-specific AsyncExitStackMiddleware for closing files.
# Before this was also used for closing dependencies with yield but
# those now have their own AsyncExitStack, to properly support
# streaming responses while keeping compatibility with the previous
# versions (as of writing 0.117.1) that allowed doing
# except HTTPException inside a dependency with yield.
# This needs to happen after user middlewares because those create a
# new contextvars context copy by using a new AnyIO task group.
# This AsyncExitStack preserves the context for contextvars, not
# strictly necessary for closing files but it was one of the original
# intentions.
# If the AsyncExitStack lived outside of the custom middlewares and
# contextvars were set, for example in a dependency with 'yield'
# in that internal contextvars context, the values would not be
# available in the outer context of the AsyncExitStack.
# By placing the middleware and the AsyncExitStack here, inside all
# user middlewares, the same context is used.
# This is currently not needed, only for closing files, but used to be
# important when dependencies with yield were closed here.
Middleware(AsyncExitStackMiddleware),
]
)
app = self.router
for cls, args, kwargs in reversed(middleware):
app = cls(app, *args, **kwargs)
return app
def openapi(self) -> Dict[str, Any]:
"""
Generate the OpenAPI schema of the application. This is called by FastAPI
internally.
The first time it is called it stores the result in the attribute
`app.openapi_schema`, and next times it is called, it just returns that same
result. To avoid the cost of generating the schema every time.
If you need to modify the generated OpenAPI schema, you could modify it.
Read more in the
[FastAPI docs for OpenAPI](https://fastapi.tiangolo.com/how-to/extending-openapi/).
"""
if not self.openapi_schema:
self.openapi_schema = get_openapi(
title=self.title,
version=self.version,
openapi_version=self.openapi_version,
summary=self.summary,
description=self.description,
terms_of_service=self.terms_of_service,
contact=self.contact,
license_info=self.license_info,
routes=self.routes,
webhooks=self.webhooks.routes,
tags=self.openapi_tags,
servers=self.servers,
separate_input_output_schemas=self.separate_input_output_schemas,
external_docs=self.openapi_external_docs,
)
return self.openapi_schema
def setup(self) -> None:
if self.openapi_url:
urls = (server_data.get("url") for server_data in self.servers)
server_urls = {url for url in urls if url}
async def openapi(req: Request) -> JSONResponse:
root_path = req.scope.get("root_path", "").rstrip("/")
if root_path not in server_urls:
if root_path and self.root_path_in_servers:
self.servers.insert(0, {"url": root_path})
server_urls.add(root_path)
return JSONResponse(self.openapi())
self.add_route(self.openapi_url, openapi, include_in_schema=False)
if self.openapi_url and self.docs_url:
async def swagger_ui_html(req: Request) -> HTMLResponse:
root_path = req.scope.get("root_path", "").rstrip("/")
openapi_url = root_path + self.openapi_url
oauth2_redirect_url = self.swagger_ui_oauth2_redirect_url
if oauth2_redirect_url:
oauth2_redirect_url = root_path + oauth2_redirect_url
return get_swagger_ui_html(
openapi_url=openapi_url,
title=f"{self.title} - Swagger UI",
oauth2_redirect_url=oauth2_redirect_url,
init_oauth=self.swagger_ui_init_oauth,
swagger_ui_parameters=self.swagger_ui_parameters,
)
self.add_route(self.docs_url, swagger_ui_html, include_in_schema=False)
if self.swagger_ui_oauth2_redirect_url:
async def swagger_ui_redirect(req: Request) -> HTMLResponse:
return get_swagger_ui_oauth2_redirect_html()
self.add_route(
self.swagger_ui_oauth2_redirect_url,
swagger_ui_redirect,
include_in_schema=False,
)
if self.openapi_url and self.redoc_url:
async def redoc_html(req: Request) -> HTMLResponse:
root_path = req.scope.get("root_path", "").rstrip("/")
openapi_url = root_path + self.openapi_url
return get_redoc_html(
openapi_url=openapi_url, title=f"{self.title} - ReDoc"
)
self.add_route(self.redoc_url, redoc_html, include_in_schema=False)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if self.root_path:
scope["root_path"] = self.root_path
await super().__call__(scope, receive, send)
def add_api_route(
self,
path: str,
endpoint: Callable[..., Any],
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[IncEx] = None,
response_model_exclude: Optional[IncEx] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Union[Type[Response], DefaultPlaceholder] = Default(
JSONResponse
),
name: Optional[str] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> None:
self.router.add_api_route(
path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def api_route(
self,
path: str,
*,
response_model: Any = Default(None),
status_code: Optional[int] = None,
tags: Optional[List[Union[str, Enum]]] = None,
dependencies: Optional[Sequence[Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[IncEx] = None,
response_model_exclude: Optional[IncEx] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
openapi_extra: Optional[Dict[str, Any]] = None,
generate_unique_id_function: Callable[[routing.APIRoute], str] = Default(
generate_unique_id
),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.router.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
return func
return decorator
def add_api_websocket_route(
self,
path: str,
endpoint: Callable[..., Any],
name: Optional[str] = None,
*,
dependencies: Optional[Sequence[Depends]] = None,
) -> None:
self.router.add_api_websocket_route(
path,
endpoint,
name=name,
dependencies=dependencies,
)
def websocket(
self,
path: Annotated[
str,
Doc(
"""
WebSocket path.
"""
),
],
name: Annotated[
Optional[str],
Doc(
"""
A name for the WebSocket. Only used internally.
"""
),
] = None,
*,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of dependencies (using `Depends()`) to be used for this
WebSocket.
Read more about it in the
[FastAPI docs for WebSockets](https://fastapi.tiangolo.com/advanced/websockets/).
"""
),
] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Decorate a WebSocket function.
Read more about it in the
[FastAPI docs for WebSockets](https://fastapi.tiangolo.com/advanced/websockets/).
**Example**
```python
from fastapi import FastAPI, WebSocket
app = FastAPI()
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
while True:
data = await websocket.receive_text()
await websocket.send_text(f"Message text was: {data}")
```
"""
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_api_websocket_route(
path,
func,
name=name,
dependencies=dependencies,
)
return func
return decorator
def include_router(
self,
router: Annotated[routing.APIRouter, Doc("The `APIRouter` to include.")],
*,
prefix: Annotated[str, Doc("An optional path prefix for the router.")] = "",
tags: Annotated[
Optional[List[Union[str, Enum]]],
Doc(
"""
A list of tags to be applied to all the *path operations* in this
router.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of dependencies (using `Depends()`) to be applied to all the
*path operations* in this router.
Read more about it in the
[FastAPI docs for Bigger Applications - Multiple Files](https://fastapi.tiangolo.com/tutorial/bigger-applications/#include-an-apirouter-with-a-custom-prefix-tags-responses-and-dependencies).
**Example**
```python
from fastapi import Depends, FastAPI
from .dependencies import get_token_header
from .internal import admin
app = FastAPI()
app.include_router(
admin.router,
dependencies=[Depends(get_token_header)],
)
```
"""
),
] = None,
responses: Annotated[
Optional[Dict[Union[int, str], Dict[str, Any]]],
Doc(
"""
Additional responses to be shown in OpenAPI.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Additional Responses in OpenAPI](https://fastapi.tiangolo.com/advanced/additional-responses/).
And in the
[FastAPI docs for Bigger Applications](https://fastapi.tiangolo.com/tutorial/bigger-applications/#include-an-apirouter-with-a-custom-prefix-tags-responses-and-dependencies).
"""
),
] = None,
deprecated: Annotated[
Optional[bool],
Doc(
"""
Mark all the *path operations* in this router as deprecated.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
**Example**
```python
from fastapi import FastAPI
from .internal import old_api
app = FastAPI()
app.include_router(
old_api.router,
deprecated=True,
)
```
"""
),
] = None,
include_in_schema: Annotated[
bool,
Doc(
"""
Include (or not) all the *path operations* in this router in the
generated OpenAPI schema.
This affects the generated OpenAPI (e.g. visible at `/docs`).
**Example**
```python
from fastapi import FastAPI
from .internal import old_api
app = FastAPI()
app.include_router(
old_api.router,
include_in_schema=False,
)
```
"""
),
] = True,
default_response_class: Annotated[
Type[Response],
Doc(
"""
Default response class to be used for the *path operations* in this
router.
Read more in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#default-response-class).
**Example**
```python
from fastapi import FastAPI
from fastapi.responses import ORJSONResponse
from .internal import old_api
app = FastAPI()
app.include_router(
old_api.router,
default_response_class=ORJSONResponse,
)
```
"""
),
] = Default(JSONResponse),
callbacks: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
List of *path operations* that will be used as OpenAPI callbacks.
This is only for OpenAPI documentation, the callbacks won't be used
directly.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/).
"""
),
] = None,
generate_unique_id_function: Annotated[
Callable[[routing.APIRoute], str],
Doc(
"""
Customize the function used to generate unique IDs for the *path
operations* shown in the generated OpenAPI.
This is particularly useful when automatically generating clients or
SDKs for your API.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = Default(generate_unique_id),
) -> None:
"""
Include an `APIRouter` in the same app.
Read more about it in the
[FastAPI docs for Bigger Applications](https://fastapi.tiangolo.com/tutorial/bigger-applications/).
## Example
```python
from fastapi import FastAPI
from .users import users_router
app = FastAPI()
app.include_router(users_router)
```
"""
self.router.include_router(
router,
prefix=prefix,
tags=tags,
dependencies=dependencies,
responses=responses,
deprecated=deprecated,
include_in_schema=include_in_schema,
default_response_class=default_response_class,
callbacks=callbacks,
generate_unique_id_function=generate_unique_id_function,
)
def get(
self,
path: Annotated[
str,
Doc(
"""
The URL path to be used for this *path operation*.
For example, in `http://example.com/items`, the path is `/items`.
"""
),
],
*,
response_model: Annotated[
Any,
Doc(
"""
The type to use for the response.
It could be any valid Pydantic *field* type. So, it doesn't have to
be a Pydantic model, it could be other things, like a `list`, `dict`,
etc.
It will be used for:
* Documentation: the generated OpenAPI (and the UI at `/docs`) will
show it as the response (JSON Schema).
* Serialization: you could return an arbitrary object and the
`response_model` would be used to serialize that object into the
corresponding JSON.
* Filtering: the JSON sent to the client will only contain the data
(fields) defined in the `response_model`. If you returned an object
that contains an attribute `password` but the `response_model` does
not include that field, the JSON sent to the client would not have
that `password`.
* Validation: whatever you return will be serialized with the
`response_model`, converting any data as necessary to generate the
corresponding JSON. But if the data in the object returned is not
valid, that would mean a violation of the contract with the client,
so it's an error from the API developer. So, FastAPI will raise an
error and return a 500 error code (Internal Server Error).
Read more about it in the
[FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/).
"""
),
] = Default(None),
status_code: Annotated[
Optional[int],
Doc(
"""
The default status code to be used for the response.
You could override the status code by returning a response directly.
Read more about it in the
[FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/).
"""
),
] = None,
tags: Annotated[
Optional[List[Union[str, Enum]]],
Doc(
"""
A list of tags to be applied to the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags).
"""
),
] = None,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of dependencies (using `Depends()`) to be applied to the
*path operation*.
Read more about it in the
[FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/).
"""
),
] = None,
summary: Annotated[
Optional[str],
Doc(
"""
A summary for the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
A description for the *path operation*.
If not provided, it will be extracted automatically from the docstring
of the *path operation function*.
It can contain Markdown.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
response_description: Annotated[
str,
Doc(
"""
The description for the default response.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = "Successful Response",
responses: Annotated[
Optional[Dict[Union[int, str], Dict[str, Any]]],
Doc(
"""
Additional responses that could be returned by this *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
deprecated: Annotated[
Optional[bool],
Doc(
"""
Mark this *path operation* as deprecated.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
operation_id: Annotated[
Optional[str],
Doc(
"""
Custom operation ID to be used by this *path operation*.
By default, it is generated automatically.
If you provide a custom operation ID, you need to make sure it is
unique for the whole API.
You can customize the
operation ID generation with the parameter
`generate_unique_id_function` in the `FastAPI` class.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = None,
response_model_include: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to include only certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_exclude: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to exclude certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_by_alias: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response model
should be serialized by alias when an alias is used.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = True,
response_model_exclude_unset: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that were not set and
have their default values. This is different from
`response_model_exclude_defaults` in that if the fields are set,
they will be included in the response, even if the value is the same
as the default.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_defaults: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that have the same value
as the default. This is different from `response_model_exclude_unset`
in that if the fields are set but contain the same default values,
they will be excluded from the response.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_none: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data should
exclude fields set to `None`.
This is much simpler (less smart) than `response_model_exclude_unset`
and `response_model_exclude_defaults`. You probably want to use one of
those two instead of this one, as those allow returning `None` values
when it makes sense.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none).
"""
),
] = False,
include_in_schema: Annotated[
bool,
Doc(
"""
Include this *path operation* in the generated OpenAPI schema.
This affects the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-parameters-from-openapi).
"""
),
] = True,
response_class: Annotated[
Type[Response],
Doc(
"""
Response class to be used for this *path operation*.
This will not be used if you return a response directly.
Read more about it in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse).
"""
),
] = Default(JSONResponse),
name: Annotated[
Optional[str],
Doc(
"""
Name for this *path operation*. Only used internally.
"""
),
] = None,
callbacks: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
List of *path operations* that will be used as OpenAPI callbacks.
This is only for OpenAPI documentation, the callbacks won't be used
directly.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/).
"""
),
] = None,
openapi_extra: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
Extra metadata to be included in the OpenAPI schema for this *path
operation*.
Read more about it in the
[FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema).
"""
),
] = None,
generate_unique_id_function: Annotated[
Callable[[routing.APIRoute], str],
Doc(
"""
Customize the function used to generate unique IDs for the *path
operations* shown in the generated OpenAPI.
This is particularly useful when automatically generating clients or
SDKs for your API.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = Default(generate_unique_id),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add a *path operation* using an HTTP GET operation.
## Example
```python
from fastapi import FastAPI
app = FastAPI()
@app.get("/items/")
def read_items():
return [{"name": "Empanada"}, {"name": "Arepa"}]
```
"""
return self.router.get(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def put(
self,
path: Annotated[
str,
Doc(
"""
The URL path to be used for this *path operation*.
For example, in `http://example.com/items`, the path is `/items`.
"""
),
],
*,
response_model: Annotated[
Any,
Doc(
"""
The type to use for the response.
It could be any valid Pydantic *field* type. So, it doesn't have to
be a Pydantic model, it could be other things, like a `list`, `dict`,
etc.
It will be used for:
* Documentation: the generated OpenAPI (and the UI at `/docs`) will
show it as the response (JSON Schema).
* Serialization: you could return an arbitrary object and the
`response_model` would be used to serialize that object into the
corresponding JSON.
* Filtering: the JSON sent to the client will only contain the data
(fields) defined in the `response_model`. If you returned an object
that contains an attribute `password` but the `response_model` does
not include that field, the JSON sent to the client would not have
that `password`.
* Validation: whatever you return will be serialized with the
`response_model`, converting any data as necessary to generate the
corresponding JSON. But if the data in the object returned is not
valid, that would mean a violation of the contract with the client,
so it's an error from the API developer. So, FastAPI will raise an
error and return a 500 error code (Internal Server Error).
Read more about it in the
[FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/).
"""
),
] = Default(None),
status_code: Annotated[
Optional[int],
Doc(
"""
The default status code to be used for the response.
You could override the status code by returning a response directly.
Read more about it in the
[FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/).
"""
),
] = None,
tags: Annotated[
Optional[List[Union[str, Enum]]],
Doc(
"""
A list of tags to be applied to the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags).
"""
),
] = None,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of dependencies (using `Depends()`) to be applied to the
*path operation*.
Read more about it in the
[FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/).
"""
),
] = None,
summary: Annotated[
Optional[str],
Doc(
"""
A summary for the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
A description for the *path operation*.
If not provided, it will be extracted automatically from the docstring
of the *path operation function*.
It can contain Markdown.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
response_description: Annotated[
str,
Doc(
"""
The description for the default response.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = "Successful Response",
responses: Annotated[
Optional[Dict[Union[int, str], Dict[str, Any]]],
Doc(
"""
Additional responses that could be returned by this *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
deprecated: Annotated[
Optional[bool],
Doc(
"""
Mark this *path operation* as deprecated.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
operation_id: Annotated[
Optional[str],
Doc(
"""
Custom operation ID to be used by this *path operation*.
By default, it is generated automatically.
If you provide a custom operation ID, you need to make sure it is
unique for the whole API.
You can customize the
operation ID generation with the parameter
`generate_unique_id_function` in the `FastAPI` class.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = None,
response_model_include: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to include only certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_exclude: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to exclude certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_by_alias: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response model
should be serialized by alias when an alias is used.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = True,
response_model_exclude_unset: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that were not set and
have their default values. This is different from
`response_model_exclude_defaults` in that if the fields are set,
they will be included in the response, even if the value is the same
as the default.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_defaults: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that have the same value
as the default. This is different from `response_model_exclude_unset`
in that if the fields are set but contain the same default values,
they will be excluded from the response.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_none: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data should
exclude fields set to `None`.
This is much simpler (less smart) than `response_model_exclude_unset`
and `response_model_exclude_defaults`. You probably want to use one of
those two instead of this one, as those allow returning `None` values
when it makes sense.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none).
"""
),
] = False,
include_in_schema: Annotated[
bool,
Doc(
"""
Include this *path operation* in the generated OpenAPI schema.
This affects the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-parameters-from-openapi).
"""
),
] = True,
response_class: Annotated[
Type[Response],
Doc(
"""
Response class to be used for this *path operation*.
This will not be used if you return a response directly.
Read more about it in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse).
"""
),
] = Default(JSONResponse),
name: Annotated[
Optional[str],
Doc(
"""
Name for this *path operation*. Only used internally.
"""
),
] = None,
callbacks: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
List of *path operations* that will be used as OpenAPI callbacks.
This is only for OpenAPI documentation, the callbacks won't be used
directly.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/).
"""
),
] = None,
openapi_extra: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
Extra metadata to be included in the OpenAPI schema for this *path
operation*.
Read more about it in the
[FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema).
"""
),
] = None,
generate_unique_id_function: Annotated[
Callable[[routing.APIRoute], str],
Doc(
"""
Customize the function used to generate unique IDs for the *path
operations* shown in the generated OpenAPI.
This is particularly useful when automatically generating clients or
SDKs for your API.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = Default(generate_unique_id),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add a *path operation* using an HTTP PUT operation.
## Example
```python
from fastapi import FastAPI
from pydantic import BaseModel
class Item(BaseModel):
name: str
description: str | None = None
app = FastAPI()
@app.put("/items/{item_id}")
def replace_item(item_id: str, item: Item):
return {"message": "Item replaced", "id": item_id}
```
"""
return self.router.put(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def post(
self,
path: Annotated[
str,
Doc(
"""
The URL path to be used for this *path operation*.
For example, in `http://example.com/items`, the path is `/items`.
"""
),
],
*,
response_model: Annotated[
Any,
Doc(
"""
The type to use for the response.
It could be any valid Pydantic *field* type. So, it doesn't have to
be a Pydantic model, it could be other things, like a `list`, `dict`,
etc.
It will be used for:
* Documentation: the generated OpenAPI (and the UI at `/docs`) will
show it as the response (JSON Schema).
* Serialization: you could return an arbitrary object and the
`response_model` would be used to serialize that object into the
corresponding JSON.
* Filtering: the JSON sent to the client will only contain the data
(fields) defined in the `response_model`. If you returned an object
that contains an attribute `password` but the `response_model` does
not include that field, the JSON sent to the client would not have
that `password`.
* Validation: whatever you return will be serialized with the
`response_model`, converting any data as necessary to generate the
corresponding JSON. But if the data in the object returned is not
valid, that would mean a violation of the contract with the client,
so it's an error from the API developer. So, FastAPI will raise an
error and return a 500 error code (Internal Server Error).
Read more about it in the
[FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/).
"""
),
] = Default(None),
status_code: Annotated[
Optional[int],
Doc(
"""
The default status code to be used for the response.
You could override the status code by returning a response directly.
Read more about it in the
[FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/).
"""
),
] = None,
tags: Annotated[
Optional[List[Union[str, Enum]]],
Doc(
"""
A list of tags to be applied to the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags).
"""
),
] = None,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of dependencies (using `Depends()`) to be applied to the
*path operation*.
Read more about it in the
[FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/).
"""
),
] = None,
summary: Annotated[
Optional[str],
Doc(
"""
A summary for the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
A description for the *path operation*.
If not provided, it will be extracted automatically from the docstring
of the *path operation function*.
It can contain Markdown.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
response_description: Annotated[
str,
Doc(
"""
The description for the default response.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = "Successful Response",
responses: Annotated[
Optional[Dict[Union[int, str], Dict[str, Any]]],
Doc(
"""
Additional responses that could be returned by this *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
deprecated: Annotated[
Optional[bool],
Doc(
"""
Mark this *path operation* as deprecated.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
operation_id: Annotated[
Optional[str],
Doc(
"""
Custom operation ID to be used by this *path operation*.
By default, it is generated automatically.
If you provide a custom operation ID, you need to make sure it is
unique for the whole API.
You can customize the
operation ID generation with the parameter
`generate_unique_id_function` in the `FastAPI` class.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = None,
response_model_include: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to include only certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_exclude: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to exclude certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_by_alias: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response model
should be serialized by alias when an alias is used.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = True,
response_model_exclude_unset: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that were not set and
have their default values. This is different from
`response_model_exclude_defaults` in that if the fields are set,
they will be included in the response, even if the value is the same
as the default.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_defaults: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that have the same value
as the default. This is different from `response_model_exclude_unset`
in that if the fields are set but contain the same default values,
they will be excluded from the response.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_none: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data should
exclude fields set to `None`.
This is much simpler (less smart) than `response_model_exclude_unset`
and `response_model_exclude_defaults`. You probably want to use one of
those two instead of this one, as those allow returning `None` values
when it makes sense.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none).
"""
),
] = False,
include_in_schema: Annotated[
bool,
Doc(
"""
Include this *path operation* in the generated OpenAPI schema.
This affects the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-parameters-from-openapi).
"""
),
] = True,
response_class: Annotated[
Type[Response],
Doc(
"""
Response class to be used for this *path operation*.
This will not be used if you return a response directly.
Read more about it in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse).
"""
),
] = Default(JSONResponse),
name: Annotated[
Optional[str],
Doc(
"""
Name for this *path operation*. Only used internally.
"""
),
] = None,
callbacks: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
List of *path operations* that will be used as OpenAPI callbacks.
This is only for OpenAPI documentation, the callbacks won't be used
directly.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/).
"""
),
] = None,
openapi_extra: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
Extra metadata to be included in the OpenAPI schema for this *path
operation*.
Read more about it in the
[FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema).
"""
),
] = None,
generate_unique_id_function: Annotated[
Callable[[routing.APIRoute], str],
Doc(
"""
Customize the function used to generate unique IDs for the *path
operations* shown in the generated OpenAPI.
This is particularly useful when automatically generating clients or
SDKs for your API.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = Default(generate_unique_id),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add a *path operation* using an HTTP POST operation.
## Example
```python
from fastapi import FastAPI
from pydantic import BaseModel
class Item(BaseModel):
name: str
description: str | None = None
app = FastAPI()
@app.post("/items/")
def create_item(item: Item):
return {"message": "Item created"}
```
"""
return self.router.post(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def delete(
self,
path: Annotated[
str,
Doc(
"""
The URL path to be used for this *path operation*.
For example, in `http://example.com/items`, the path is `/items`.
"""
),
],
*,
response_model: Annotated[
Any,
Doc(
"""
The type to use for the response.
It could be any valid Pydantic *field* type. So, it doesn't have to
be a Pydantic model, it could be other things, like a `list`, `dict`,
etc.
It will be used for:
* Documentation: the generated OpenAPI (and the UI at `/docs`) will
show it as the response (JSON Schema).
* Serialization: you could return an arbitrary object and the
`response_model` would be used to serialize that object into the
corresponding JSON.
* Filtering: the JSON sent to the client will only contain the data
(fields) defined in the `response_model`. If you returned an object
that contains an attribute `password` but the `response_model` does
not include that field, the JSON sent to the client would not have
that `password`.
* Validation: whatever you return will be serialized with the
`response_model`, converting any data as necessary to generate the
corresponding JSON. But if the data in the object returned is not
valid, that would mean a violation of the contract with the client,
so it's an error from the API developer. So, FastAPI will raise an
error and return a 500 error code (Internal Server Error).
Read more about it in the
[FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/).
"""
),
] = Default(None),
status_code: Annotated[
Optional[int],
Doc(
"""
The default status code to be used for the response.
You could override the status code by returning a response directly.
Read more about it in the
[FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/).
"""
),
] = None,
tags: Annotated[
Optional[List[Union[str, Enum]]],
Doc(
"""
A list of tags to be applied to the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags).
"""
),
] = None,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of dependencies (using `Depends()`) to be applied to the
*path operation*.
Read more about it in the
[FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/).
"""
),
] = None,
summary: Annotated[
Optional[str],
Doc(
"""
A summary for the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
A description for the *path operation*.
If not provided, it will be extracted automatically from the docstring
of the *path operation function*.
It can contain Markdown.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
response_description: Annotated[
str,
Doc(
"""
The description for the default response.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = "Successful Response",
responses: Annotated[
Optional[Dict[Union[int, str], Dict[str, Any]]],
Doc(
"""
Additional responses that could be returned by this *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
deprecated: Annotated[
Optional[bool],
Doc(
"""
Mark this *path operation* as deprecated.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
operation_id: Annotated[
Optional[str],
Doc(
"""
Custom operation ID to be used by this *path operation*.
By default, it is generated automatically.
If you provide a custom operation ID, you need to make sure it is
unique for the whole API.
You can customize the
operation ID generation with the parameter
`generate_unique_id_function` in the `FastAPI` class.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = None,
response_model_include: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to include only certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_exclude: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to exclude certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_by_alias: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response model
should be serialized by alias when an alias is used.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = True,
response_model_exclude_unset: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that were not set and
have their default values. This is different from
`response_model_exclude_defaults` in that if the fields are set,
they will be included in the response, even if the value is the same
as the default.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_defaults: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that have the same value
as the default. This is different from `response_model_exclude_unset`
in that if the fields are set but contain the same default values,
they will be excluded from the response.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_none: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data should
exclude fields set to `None`.
This is much simpler (less smart) than `response_model_exclude_unset`
and `response_model_exclude_defaults`. You probably want to use one of
those two instead of this one, as those allow returning `None` values
when it makes sense.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none).
"""
),
] = False,
include_in_schema: Annotated[
bool,
Doc(
"""
Include this *path operation* in the generated OpenAPI schema.
This affects the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-parameters-from-openapi).
"""
),
] = True,
response_class: Annotated[
Type[Response],
Doc(
"""
Response class to be used for this *path operation*.
This will not be used if you return a response directly.
Read more about it in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse).
"""
),
] = Default(JSONResponse),
name: Annotated[
Optional[str],
Doc(
"""
Name for this *path operation*. Only used internally.
"""
),
] = None,
callbacks: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
List of *path operations* that will be used as OpenAPI callbacks.
This is only for OpenAPI documentation, the callbacks won't be used
directly.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/).
"""
),
] = None,
openapi_extra: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
Extra metadata to be included in the OpenAPI schema for this *path
operation*.
Read more about it in the
[FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema).
"""
),
] = None,
generate_unique_id_function: Annotated[
Callable[[routing.APIRoute], str],
Doc(
"""
Customize the function used to generate unique IDs for the *path
operations* shown in the generated OpenAPI.
This is particularly useful when automatically generating clients or
SDKs for your API.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = Default(generate_unique_id),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add a *path operation* using an HTTP DELETE operation.
## Example
```python
from fastapi import FastAPI
app = FastAPI()
@app.delete("/items/{item_id}")
def delete_item(item_id: str):
return {"message": "Item deleted"}
```
"""
return self.router.delete(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def options(
self,
path: Annotated[
str,
Doc(
"""
The URL path to be used for this *path operation*.
For example, in `http://example.com/items`, the path is `/items`.
"""
),
],
*,
response_model: Annotated[
Any,
Doc(
"""
The type to use for the response.
It could be any valid Pydantic *field* type. So, it doesn't have to
be a Pydantic model, it could be other things, like a `list`, `dict`,
etc.
It will be used for:
* Documentation: the generated OpenAPI (and the UI at `/docs`) will
show it as the response (JSON Schema).
* Serialization: you could return an arbitrary object and the
`response_model` would be used to serialize that object into the
corresponding JSON.
* Filtering: the JSON sent to the client will only contain the data
(fields) defined in the `response_model`. If you returned an object
that contains an attribute `password` but the `response_model` does
not include that field, the JSON sent to the client would not have
that `password`.
* Validation: whatever you return will be serialized with the
`response_model`, converting any data as necessary to generate the
corresponding JSON. But if the data in the object returned is not
valid, that would mean a violation of the contract with the client,
so it's an error from the API developer. So, FastAPI will raise an
error and return a 500 error code (Internal Server Error).
Read more about it in the
[FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/).
"""
),
] = Default(None),
status_code: Annotated[
Optional[int],
Doc(
"""
The default status code to be used for the response.
You could override the status code by returning a response directly.
Read more about it in the
[FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/).
"""
),
] = None,
tags: Annotated[
Optional[List[Union[str, Enum]]],
Doc(
"""
A list of tags to be applied to the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags).
"""
),
] = None,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of dependencies (using `Depends()`) to be applied to the
*path operation*.
Read more about it in the
[FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/).
"""
),
] = None,
summary: Annotated[
Optional[str],
Doc(
"""
A summary for the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
A description for the *path operation*.
If not provided, it will be extracted automatically from the docstring
of the *path operation function*.
It can contain Markdown.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
response_description: Annotated[
str,
Doc(
"""
The description for the default response.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = "Successful Response",
responses: Annotated[
Optional[Dict[Union[int, str], Dict[str, Any]]],
Doc(
"""
Additional responses that could be returned by this *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
deprecated: Annotated[
Optional[bool],
Doc(
"""
Mark this *path operation* as deprecated.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
operation_id: Annotated[
Optional[str],
Doc(
"""
Custom operation ID to be used by this *path operation*.
By default, it is generated automatically.
If you provide a custom operation ID, you need to make sure it is
unique for the whole API.
You can customize the
operation ID generation with the parameter
`generate_unique_id_function` in the `FastAPI` class.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = None,
response_model_include: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to include only certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_exclude: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to exclude certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_by_alias: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response model
should be serialized by alias when an alias is used.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = True,
response_model_exclude_unset: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that were not set and
have their default values. This is different from
`response_model_exclude_defaults` in that if the fields are set,
they will be included in the response, even if the value is the same
as the default.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_defaults: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that have the same value
as the default. This is different from `response_model_exclude_unset`
in that if the fields are set but contain the same default values,
they will be excluded from the response.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_none: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data should
exclude fields set to `None`.
This is much simpler (less smart) than `response_model_exclude_unset`
and `response_model_exclude_defaults`. You probably want to use one of
those two instead of this one, as those allow returning `None` values
when it makes sense.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none).
"""
),
] = False,
include_in_schema: Annotated[
bool,
Doc(
"""
Include this *path operation* in the generated OpenAPI schema.
This affects the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-parameters-from-openapi).
"""
),
] = True,
response_class: Annotated[
Type[Response],
Doc(
"""
Response class to be used for this *path operation*.
This will not be used if you return a response directly.
Read more about it in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse).
"""
),
] = Default(JSONResponse),
name: Annotated[
Optional[str],
Doc(
"""
Name for this *path operation*. Only used internally.
"""
),
] = None,
callbacks: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
List of *path operations* that will be used as OpenAPI callbacks.
This is only for OpenAPI documentation, the callbacks won't be used
directly.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/).
"""
),
] = None,
openapi_extra: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
Extra metadata to be included in the OpenAPI schema for this *path
operation*.
Read more about it in the
[FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema).
"""
),
] = None,
generate_unique_id_function: Annotated[
Callable[[routing.APIRoute], str],
Doc(
"""
Customize the function used to generate unique IDs for the *path
operations* shown in the generated OpenAPI.
This is particularly useful when automatically generating clients or
SDKs for your API.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = Default(generate_unique_id),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add a *path operation* using an HTTP OPTIONS operation.
## Example
```python
from fastapi import FastAPI
app = FastAPI()
@app.options("/items/")
def get_item_options():
return {"additions": ["Aji", "Guacamole"]}
```
"""
return self.router.options(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def head(
self,
path: Annotated[
str,
Doc(
"""
The URL path to be used for this *path operation*.
For example, in `http://example.com/items`, the path is `/items`.
"""
),
],
*,
response_model: Annotated[
Any,
Doc(
"""
The type to use for the response.
It could be any valid Pydantic *field* type. So, it doesn't have to
be a Pydantic model, it could be other things, like a `list`, `dict`,
etc.
It will be used for:
* Documentation: the generated OpenAPI (and the UI at `/docs`) will
show it as the response (JSON Schema).
* Serialization: you could return an arbitrary object and the
`response_model` would be used to serialize that object into the
corresponding JSON.
* Filtering: the JSON sent to the client will only contain the data
(fields) defined in the `response_model`. If you returned an object
that contains an attribute `password` but the `response_model` does
not include that field, the JSON sent to the client would not have
that `password`.
* Validation: whatever you return will be serialized with the
`response_model`, converting any data as necessary to generate the
corresponding JSON. But if the data in the object returned is not
valid, that would mean a violation of the contract with the client,
so it's an error from the API developer. So, FastAPI will raise an
error and return a 500 error code (Internal Server Error).
Read more about it in the
[FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/).
"""
),
] = Default(None),
status_code: Annotated[
Optional[int],
Doc(
"""
The default status code to be used for the response.
You could override the status code by returning a response directly.
Read more about it in the
[FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/).
"""
),
] = None,
tags: Annotated[
Optional[List[Union[str, Enum]]],
Doc(
"""
A list of tags to be applied to the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags).
"""
),
] = None,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of dependencies (using `Depends()`) to be applied to the
*path operation*.
Read more about it in the
[FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/).
"""
),
] = None,
summary: Annotated[
Optional[str],
Doc(
"""
A summary for the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
A description for the *path operation*.
If not provided, it will be extracted automatically from the docstring
of the *path operation function*.
It can contain Markdown.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
response_description: Annotated[
str,
Doc(
"""
The description for the default response.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = "Successful Response",
responses: Annotated[
Optional[Dict[Union[int, str], Dict[str, Any]]],
Doc(
"""
Additional responses that could be returned by this *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
deprecated: Annotated[
Optional[bool],
Doc(
"""
Mark this *path operation* as deprecated.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
operation_id: Annotated[
Optional[str],
Doc(
"""
Custom operation ID to be used by this *path operation*.
By default, it is generated automatically.
If you provide a custom operation ID, you need to make sure it is
unique for the whole API.
You can customize the
operation ID generation with the parameter
`generate_unique_id_function` in the `FastAPI` class.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = None,
response_model_include: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to include only certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_exclude: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to exclude certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_by_alias: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response model
should be serialized by alias when an alias is used.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = True,
response_model_exclude_unset: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that were not set and
have their default values. This is different from
`response_model_exclude_defaults` in that if the fields are set,
they will be included in the response, even if the value is the same
as the default.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_defaults: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that have the same value
as the default. This is different from `response_model_exclude_unset`
in that if the fields are set but contain the same default values,
they will be excluded from the response.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_none: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data should
exclude fields set to `None`.
This is much simpler (less smart) than `response_model_exclude_unset`
and `response_model_exclude_defaults`. You probably want to use one of
those two instead of this one, as those allow returning `None` values
when it makes sense.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none).
"""
),
] = False,
include_in_schema: Annotated[
bool,
Doc(
"""
Include this *path operation* in the generated OpenAPI schema.
This affects the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-parameters-from-openapi).
"""
),
] = True,
response_class: Annotated[
Type[Response],
Doc(
"""
Response class to be used for this *path operation*.
This will not be used if you return a response directly.
Read more about it in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse).
"""
),
] = Default(JSONResponse),
name: Annotated[
Optional[str],
Doc(
"""
Name for this *path operation*. Only used internally.
"""
),
] = None,
callbacks: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
List of *path operations* that will be used as OpenAPI callbacks.
This is only for OpenAPI documentation, the callbacks won't be used
directly.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/).
"""
),
] = None,
openapi_extra: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
Extra metadata to be included in the OpenAPI schema for this *path
operation*.
Read more about it in the
[FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema).
"""
),
] = None,
generate_unique_id_function: Annotated[
Callable[[routing.APIRoute], str],
Doc(
"""
Customize the function used to generate unique IDs for the *path
operations* shown in the generated OpenAPI.
This is particularly useful when automatically generating clients or
SDKs for your API.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = Default(generate_unique_id),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add a *path operation* using an HTTP HEAD operation.
## Example
```python
from fastapi import FastAPI, Response
app = FastAPI()
@app.head("/items/", status_code=204)
def get_items_headers(response: Response):
response.headers["X-Cat-Dog"] = "Alone in the world"
```
"""
return self.router.head(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def patch(
self,
path: Annotated[
str,
Doc(
"""
The URL path to be used for this *path operation*.
For example, in `http://example.com/items`, the path is `/items`.
"""
),
],
*,
response_model: Annotated[
Any,
Doc(
"""
The type to use for the response.
It could be any valid Pydantic *field* type. So, it doesn't have to
be a Pydantic model, it could be other things, like a `list`, `dict`,
etc.
It will be used for:
* Documentation: the generated OpenAPI (and the UI at `/docs`) will
show it as the response (JSON Schema).
* Serialization: you could return an arbitrary object and the
`response_model` would be used to serialize that object into the
corresponding JSON.
* Filtering: the JSON sent to the client will only contain the data
(fields) defined in the `response_model`. If you returned an object
that contains an attribute `password` but the `response_model` does
not include that field, the JSON sent to the client would not have
that `password`.
* Validation: whatever you return will be serialized with the
`response_model`, converting any data as necessary to generate the
corresponding JSON. But if the data in the object returned is not
valid, that would mean a violation of the contract with the client,
so it's an error from the API developer. So, FastAPI will raise an
error and return a 500 error code (Internal Server Error).
Read more about it in the
[FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/).
"""
),
] = Default(None),
status_code: Annotated[
Optional[int],
Doc(
"""
The default status code to be used for the response.
You could override the status code by returning a response directly.
Read more about it in the
[FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/).
"""
),
] = None,
tags: Annotated[
Optional[List[Union[str, Enum]]],
Doc(
"""
A list of tags to be applied to the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags).
"""
),
] = None,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of dependencies (using `Depends()`) to be applied to the
*path operation*.
Read more about it in the
[FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/).
"""
),
] = None,
summary: Annotated[
Optional[str],
Doc(
"""
A summary for the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
A description for the *path operation*.
If not provided, it will be extracted automatically from the docstring
of the *path operation function*.
It can contain Markdown.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
response_description: Annotated[
str,
Doc(
"""
The description for the default response.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = "Successful Response",
responses: Annotated[
Optional[Dict[Union[int, str], Dict[str, Any]]],
Doc(
"""
Additional responses that could be returned by this *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
deprecated: Annotated[
Optional[bool],
Doc(
"""
Mark this *path operation* as deprecated.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
operation_id: Annotated[
Optional[str],
Doc(
"""
Custom operation ID to be used by this *path operation*.
By default, it is generated automatically.
If you provide a custom operation ID, you need to make sure it is
unique for the whole API.
You can customize the
operation ID generation with the parameter
`generate_unique_id_function` in the `FastAPI` class.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = None,
response_model_include: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to include only certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_exclude: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to exclude certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_by_alias: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response model
should be serialized by alias when an alias is used.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = True,
response_model_exclude_unset: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that were not set and
have their default values. This is different from
`response_model_exclude_defaults` in that if the fields are set,
they will be included in the response, even if the value is the same
as the default.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_defaults: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that have the same value
as the default. This is different from `response_model_exclude_unset`
in that if the fields are set but contain the same default values,
they will be excluded from the response.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_none: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data should
exclude fields set to `None`.
This is much simpler (less smart) than `response_model_exclude_unset`
and `response_model_exclude_defaults`. You probably want to use one of
those two instead of this one, as those allow returning `None` values
when it makes sense.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none).
"""
),
] = False,
include_in_schema: Annotated[
bool,
Doc(
"""
Include this *path operation* in the generated OpenAPI schema.
This affects the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-parameters-from-openapi).
"""
),
] = True,
response_class: Annotated[
Type[Response],
Doc(
"""
Response class to be used for this *path operation*.
This will not be used if you return a response directly.
Read more about it in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse).
"""
),
] = Default(JSONResponse),
name: Annotated[
Optional[str],
Doc(
"""
Name for this *path operation*. Only used internally.
"""
),
] = None,
callbacks: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
List of *path operations* that will be used as OpenAPI callbacks.
This is only for OpenAPI documentation, the callbacks won't be used
directly.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/).
"""
),
] = None,
openapi_extra: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
Extra metadata to be included in the OpenAPI schema for this *path
operation*.
Read more about it in the
[FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema).
"""
),
] = None,
generate_unique_id_function: Annotated[
Callable[[routing.APIRoute], str],
Doc(
"""
Customize the function used to generate unique IDs for the *path
operations* shown in the generated OpenAPI.
This is particularly useful when automatically generating clients or
SDKs for your API.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = Default(generate_unique_id),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add a *path operation* using an HTTP PATCH operation.
## Example
```python
from fastapi import FastAPI
from pydantic import BaseModel
class Item(BaseModel):
name: str
description: str | None = None
app = FastAPI()
@app.patch("/items/")
def update_item(item: Item):
return {"message": "Item updated in place"}
```
"""
return self.router.patch(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def trace(
self,
path: Annotated[
str,
Doc(
"""
The URL path to be used for this *path operation*.
For example, in `http://example.com/items`, the path is `/items`.
"""
),
],
*,
response_model: Annotated[
Any,
Doc(
"""
The type to use for the response.
It could be any valid Pydantic *field* type. So, it doesn't have to
be a Pydantic model, it could be other things, like a `list`, `dict`,
etc.
It will be used for:
* Documentation: the generated OpenAPI (and the UI at `/docs`) will
show it as the response (JSON Schema).
* Serialization: you could return an arbitrary object and the
`response_model` would be used to serialize that object into the
corresponding JSON.
* Filtering: the JSON sent to the client will only contain the data
(fields) defined in the `response_model`. If you returned an object
that contains an attribute `password` but the `response_model` does
not include that field, the JSON sent to the client would not have
that `password`.
* Validation: whatever you return will be serialized with the
`response_model`, converting any data as necessary to generate the
corresponding JSON. But if the data in the object returned is not
valid, that would mean a violation of the contract with the client,
so it's an error from the API developer. So, FastAPI will raise an
error and return a 500 error code (Internal Server Error).
Read more about it in the
[FastAPI docs for Response Model](https://fastapi.tiangolo.com/tutorial/response-model/).
"""
),
] = Default(None),
status_code: Annotated[
Optional[int],
Doc(
"""
The default status code to be used for the response.
You could override the status code by returning a response directly.
Read more about it in the
[FastAPI docs for Response Status Code](https://fastapi.tiangolo.com/tutorial/response-status-code/).
"""
),
] = None,
tags: Annotated[
Optional[List[Union[str, Enum]]],
Doc(
"""
A list of tags to be applied to the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/#tags).
"""
),
] = None,
dependencies: Annotated[
Optional[Sequence[Depends]],
Doc(
"""
A list of dependencies (using `Depends()`) to be applied to the
*path operation*.
Read more about it in the
[FastAPI docs for Dependencies in path operation decorators](https://fastapi.tiangolo.com/tutorial/dependencies/dependencies-in-path-operation-decorators/).
"""
),
] = None,
summary: Annotated[
Optional[str],
Doc(
"""
A summary for the *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
A description for the *path operation*.
If not provided, it will be extracted automatically from the docstring
of the *path operation function*.
It can contain Markdown.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Path Operation Configuration](https://fastapi.tiangolo.com/tutorial/path-operation-configuration/).
"""
),
] = None,
response_description: Annotated[
str,
Doc(
"""
The description for the default response.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = "Successful Response",
responses: Annotated[
Optional[Dict[Union[int, str], Dict[str, Any]]],
Doc(
"""
Additional responses that could be returned by this *path operation*.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
deprecated: Annotated[
Optional[bool],
Doc(
"""
Mark this *path operation* as deprecated.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
operation_id: Annotated[
Optional[str],
Doc(
"""
Custom operation ID to be used by this *path operation*.
By default, it is generated automatically.
If you provide a custom operation ID, you need to make sure it is
unique for the whole API.
You can customize the
operation ID generation with the parameter
`generate_unique_id_function` in the `FastAPI` class.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = None,
response_model_include: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to include only certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_exclude: Annotated[
Optional[IncEx],
Doc(
"""
Configuration passed to Pydantic to exclude certain fields in the
response data.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = None,
response_model_by_alias: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response model
should be serialized by alias when an alias is used.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_include-and-response_model_exclude).
"""
),
] = True,
response_model_exclude_unset: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that were not set and
have their default values. This is different from
`response_model_exclude_defaults` in that if the fields are set,
they will be included in the response, even if the value is the same
as the default.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_defaults: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data
should have all the fields, including the ones that have the same value
as the default. This is different from `response_model_exclude_unset`
in that if the fields are set but contain the same default values,
they will be excluded from the response.
When `True`, default values are omitted from the response.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter).
"""
),
] = False,
response_model_exclude_none: Annotated[
bool,
Doc(
"""
Configuration passed to Pydantic to define if the response data should
exclude fields set to `None`.
This is much simpler (less smart) than `response_model_exclude_unset`
and `response_model_exclude_defaults`. You probably want to use one of
those two instead of this one, as those allow returning `None` values
when it makes sense.
Read more about it in the
[FastAPI docs for Response Model - Return Type](https://fastapi.tiangolo.com/tutorial/response-model/#response_model_exclude_none).
"""
),
] = False,
include_in_schema: Annotated[
bool,
Doc(
"""
Include this *path operation* in the generated OpenAPI schema.
This affects the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for Query Parameters and String Validations](https://fastapi.tiangolo.com/tutorial/query-params-str-validations/#exclude-parameters-from-openapi).
"""
),
] = True,
response_class: Annotated[
Type[Response],
Doc(
"""
Response class to be used for this *path operation*.
This will not be used if you return a response directly.
Read more about it in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/#redirectresponse).
"""
),
] = Default(JSONResponse),
name: Annotated[
Optional[str],
Doc(
"""
Name for this *path operation*. Only used internally.
"""
),
] = None,
callbacks: Annotated[
Optional[List[BaseRoute]],
Doc(
"""
List of *path operations* that will be used as OpenAPI callbacks.
This is only for OpenAPI documentation, the callbacks won't be used
directly.
It will be added to the generated OpenAPI (e.g. visible at `/docs`).
Read more about it in the
[FastAPI docs for OpenAPI Callbacks](https://fastapi.tiangolo.com/advanced/openapi-callbacks/).
"""
),
] = None,
openapi_extra: Annotated[
Optional[Dict[str, Any]],
Doc(
"""
Extra metadata to be included in the OpenAPI schema for this *path
operation*.
Read more about it in the
[FastAPI docs for Path Operation Advanced Configuration](https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#custom-openapi-path-operation-schema).
"""
),
] = None,
generate_unique_id_function: Annotated[
Callable[[routing.APIRoute], str],
Doc(
"""
Customize the function used to generate unique IDs for the *path
operations* shown in the generated OpenAPI.
This is particularly useful when automatically generating clients or
SDKs for your API.
Read more about it in the
[FastAPI docs about how to Generate Clients](https://fastapi.tiangolo.com/advanced/generate-clients/#custom-generate-unique-id-function).
"""
),
] = Default(generate_unique_id),
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add a *path operation* using an HTTP TRACE operation.
## Example
```python
from fastapi import FastAPI
app = FastAPI()
@app.trace("/items/{item_id}")
def trace_item(item_id: str):
return None
```
"""
return self.router.trace(
path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
openapi_extra=openapi_extra,
generate_unique_id_function=generate_unique_id_function,
)
def websocket_route(
self, path: str, name: Union[str, None] = None
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.router.add_websocket_route(path, func, name=name)
return func
return decorator
@deprecated(
"""
on_event is deprecated, use lifespan event handlers instead.
Read more about it in the
[FastAPI docs for Lifespan Events](https://fastapi.tiangolo.com/advanced/events/).
"""
)
def on_event(
self,
event_type: Annotated[
str,
Doc(
"""
The type of event. `startup` or `shutdown`.
"""
),
],
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add an event handler for the application.
`on_event` is deprecated, use `lifespan` event handlers instead.
Read more about it in the
[FastAPI docs for Lifespan Events](https://fastapi.tiangolo.com/advanced/events/#alternative-events-deprecated).
"""
return self.router.on_event(event_type)
def middleware(
self,
middleware_type: Annotated[
str,
Doc(
"""
The type of middleware. Currently only supports `http`.
"""
),
],
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add a middleware to the application.
Read more about it in the
[FastAPI docs for Middleware](https://fastapi.tiangolo.com/tutorial/middleware/).
## Example
```python
import time
from typing import Awaitable, Callable
from fastapi import FastAPI, Request, Response
app = FastAPI()
@app.middleware("http")
async def add_process_time_header(
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
```
"""
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_middleware(BaseHTTPMiddleware, dispatch=func)
return func
return decorator
def exception_handler(
self,
exc_class_or_status_code: Annotated[
Union[int, Type[Exception]],
Doc(
"""
The Exception class this would handle, or a status code.
"""
),
],
) -> Callable[[DecoratedCallable], DecoratedCallable]:
"""
Add an exception handler to the app.
Read more about it in the
[FastAPI docs for Handling Errors](https://fastapi.tiangolo.com/tutorial/handling-errors/).
## Example
```python
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
class UnicornException(Exception):
def __init__(self, name: str):
self.name = name
app = FastAPI()
@app.exception_handler(UnicornException)
async def unicorn_exception_handler(request: Request, exc: UnicornException):
return JSONResponse(
status_code=418,
content={"message": f"Oops! {exc.name} did something. There goes a rainbow..."},
)
```
"""
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_exception_handler(exc_class_or_status_code, func)
return func
return decorator
| FastAPI |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_uptime_ips.py | {
"start": 145,
"end": 529
} | class ____(APITestCase):
endpoint = "sentry-api-0-uptime-ips"
@override_options({"uptime.uptime-ips-api-response": ["10.0.0.1", "10.0.0.2"]})
def test_simple(self) -> None:
response = self.get_success_response()
# Validate that we get back IP addresses
for ip in response.content.decode().split("\n"):
ip_address(ip)
| UptimeIpsEndpointTest |
python | numba__llvmlite | llvmlite/ir/instructions.py | {
"start": 8856,
"end": 8922
} | class ____(PredictableInstr, Terminator):
pass
| ConditionalBranch |
python | readthedocs__readthedocs.org | readthedocs/oauth/querysets.py | {
"start": 3149,
"end": 3213
} | class ____(RelatedUserQuerySet):
pass
| RemoteOrganizationQuerySet |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 24794,
"end": 25278
} | class ____(models.Model):
"""
Non-historic table foreign key to historic table.
In this case it should simply behave like ForeignKey because
the origin model (this one) cannot be historic, so foreign key
lookups are always "current".
"""
name = models.CharField(max_length=15, unique=True)
organization = HistoricForeignKey(
TestOrganizationWithHistory, on_delete=CASCADE, related_name="participants"
)
| TestParticipantToHistoricOrganization |
python | huggingface__transformers | tests/models/janus/test_modeling_janus.py | {
"start": 1611,
"end": 6548
} | class ____:
def __init__(
self,
parent,
image_token_index=0,
seq_length=25,
initializer_range=0.02,
text_config={
"model_type": "llama",
"seq_length": 7,
"is_training": True,
"use_input_mask": True,
"use_token_type_ids": False,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 16,
"type_sequence_label_size": 2,
"initializer_range": 0.02,
"num_labels": 3,
"num_choices": 4,
"pad_token_id": 1,
},
is_training=True,
vision_config={
"use_labels": True,
"image_size": 20,
"patch_size": 5,
"num_image_tokens": 16,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"projection_dim": 32,
"num_key_value_heads": 1,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"mlp_ratio": 2,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
"vision_feature_select_strategy": "default",
"vision_feature_layer": -1,
},
use_cache=False,
vq_num_embeds=12,
vq_embed_dim=12,
vq_channel_multiplier=[1, 1],
):
self.parent = parent
self.initializer_range = initializer_range
# `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify
self.image_token_index = image_token_index
self.text_config = text_config
self.vision_config = vision_config
self.seq_length = seq_length
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.num_channels = vision_config["num_channels"]
self.image_size = vision_config["image_size"]
self.num_image_tokens = vision_config["num_image_tokens"]
self.use_cache = use_cache
# vq model params
self.vq_num_embeds = vq_num_embeds
self.vq_embed_dim = vq_embed_dim
self.vq_channel_multiplier = vq_channel_multiplier
def get_vq_config(self):
return {
"embed_dim": self.vq_embed_dim,
"num_embeddings": self.vq_num_embeds,
"latent_channels": self.vq_embed_dim,
"in_channels": 3,
"base_channels": 32, # we have a GroupNorm of 32 groups, so can't do less
"channel_multiplier": self.vq_channel_multiplier,
"initializer_range": self.initializer_range,
"projection_dim": 10,
"image_token_embed_dim": 32, # Same as text model hidden size
}
def get_config(self):
return JanusConfig(
text_config=self.text_config,
vision_config=self.vision_config,
vq_config=self.get_vq_config(),
image_token_id=self.image_token_index,
)
def prepare_config_and_inputs(self):
config = self.get_config()
pixel_values = floats_tensor(
[
self.batch_size,
3,
self.image_size,
self.image_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
attention_mask = input_ids.ne(self.pad_token_id).to(torch_device)
# set the 16 first tokens to be image, and ensure that no other tokens are image tokens
# do not change this unless you modified image size or patch size
input_ids[input_ids == self.image_token_index] = self.pad_token_id
input_ids[:, : self.num_image_tokens] = self.image_token_index
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": input_ids,
"generation_mode": "text", # Required to perform text generation instead of image generation.
}
return config, inputs_dict
@require_torch
| JanusVisionText2TextModelTester |
python | pytorch__pytorch | torch/_inductor/fx_passes/overlap_preserving_bucketer.py | {
"start": 773,
"end": 1725
} | class ____:
name1: str
name2: str
reason: str
args: tuple[Any, ...]
def __init__(self, node1: fx.Node, node2: fx.Node) -> None:
self.name1 = node1.name
self.name2 = node2.name
self.reason = ""
self.args = ()
def __call__(self, reason: str, *args: Any) -> None:
if bucket_log.isEnabledFor(logging.DEBUG):
bucket_log.debug(
"cannot bucket %s with %s: " + reason, # noqa: G003
self.name1,
self.name2,
*args,
)
def is_collective_or_wait(n: fx.Node) -> bool:
"""Check if node is a collective start or wait."""
if _schedulable_wait_node(n):
return True
# Collective starts have exactly one use: the wait_tensor
if len(n.users) == 1:
user = next(iter(n.users.keys()))
if _schedulable_wait_node(user):
return True
return False
@dataclass
| WhyNoBucket |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 7784,
"end": 7856
} | class ____(A18):
@staticmethod
def m(arg):
sink_b(arg)
| B18 |
python | RaRe-Technologies__gensim | gensim/test/test_probability_estimation.py | {
"start": 3213,
"end": 3567
} | class ____(BaseTestCases.ProbabilityEstimationBase):
def setup_dictionary(self):
self.dictionary = Dictionary(self.texts)
self.dictionary.id2token = {v: k for k, v in self.dictionary.token2id.items()}
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| TestProbabilityEstimationWithNormalDictionary |
python | django__django | django/templatetags/cache.py | {
"start": 235,
"end": 3550
} | class ____(Node):
def __init__(self, nodelist, expire_time_var, fragment_name, vary_on, cache_name):
self.nodelist = nodelist
self.expire_time_var = expire_time_var
self.fragment_name = fragment_name
self.vary_on = vary_on
self.cache_name = cache_name
def render(self, context):
try:
expire_time = self.expire_time_var.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError(
'"cache" tag got an unknown variable: %r' % self.expire_time_var.var
)
if expire_time is not None:
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise TemplateSyntaxError(
'"cache" tag got a non-integer timeout value: %r' % expire_time
)
if self.cache_name:
try:
cache_name = self.cache_name.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError(
'"cache" tag got an unknown variable: %r' % self.cache_name.var
)
try:
fragment_cache = caches[cache_name]
except InvalidCacheBackendError:
raise TemplateSyntaxError(
"Invalid cache name specified for cache tag: %r" % cache_name
)
else:
try:
fragment_cache = caches["template_fragments"]
except InvalidCacheBackendError:
fragment_cache = caches["default"]
vary_on = [var.resolve(context) for var in self.vary_on]
cache_key = make_template_fragment_key(self.fragment_name, vary_on)
value = fragment_cache.get(cache_key)
if value is None:
value = self.nodelist.render(context)
fragment_cache.set(cache_key, value, expire_time)
return value
@register.tag("cache")
def do_cache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time.
Usage::
{% load cache %}
{% cache [expire_time] [fragment_name] %}
.. some expensive processing ..
{% endcache %}
This tag also supports varying by a list of arguments::
{% load cache %}
{% cache [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% endcache %}
Optionally the cache to use may be specified thus::
{% cache .... using="cachename" %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(("endcache",))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
if len(tokens) > 3 and tokens[-1].startswith("using="):
cache_name = parser.compile_filter(tokens[-1].removeprefix("using="))
tokens = tokens[:-1]
else:
cache_name = None
return CacheNode(
nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(t) for t in tokens[3:]],
cache_name,
)
| CacheNode |
python | huggingface__transformers | examples/modular-transformers/modular_multimodal2.py | {
"start": 1385,
"end": 1567
} | class ____(CLIPVisionTransformer):
def __init__(self, config):
super().__init__(config)
self.encoder = Multimodal2VisionEncoder(config)
| Multimodal2VisionTransformer |
python | scipy__scipy | scipy/fft/_pocketfft/tests/test_basic.py | {
"start": 7995,
"end": 8185
} | class ____(_TestIFFTBase):
def setup_method(self):
self.cdt = np.clongdouble
self.rdt = np.longdouble
self.rtol = 1e-10
self.atol = 1e-10
| TestLongDoubleIFFT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.