language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/2900-2999/2927.Distribute Candies Among Children III/Solution.py | {
"start": 0,
"end": 316
} | class ____:
def distributeCandies(self, n: int, limit: int) -> int:
if n > 3 * limit:
return 0
ans = comb(n + 2, 2)
if n > limit:
ans -= 3 * comb(n - limit + 1, 2)
if n - 2 >= 2 * limit:
ans += 3 * comb(n - 2 * limit, 2)
return ans
| Solution |
python | doocs__leetcode | solution/0200-0299/0286.Walls and Gates/Solution.py | {
"start": 0,
"end": 692
} | class ____:
def wallsAndGates(self, rooms: List[List[int]]) -> None:
"""
Do not return anything, modify rooms in-place instead.
"""
m, n = len(rooms), len(rooms[0])
inf = 2**31 - 1
q = deque([(i, j) for i in range(m) for j in range(n) if rooms[i][j] == 0])
d = 0
while q:
d += 1
for _ in range(len(q)):
i, j = q.popleft()
for a, b in [[0, 1], [0, -1], [1, 0], [-1, 0]]:
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and rooms[x][y] == inf:
rooms[x][y] = d
q.append((x, y))
| Solution |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/sensors/test_data_factory.py | {
"start": 5733,
"end": 7423
} | class ____:
RUN_ID = "7f8c6c72-c093-11ec-a83d-0242ac120007"
SENSOR = AzureDataFactoryPipelineRunStatusSensor(
task_id="pipeline_run_sensor_async",
run_id=RUN_ID,
resource_group_name="resource-group-name",
factory_name="factory-name",
deferrable=True,
)
@mock.patch("airflow.providers.microsoft.azure.sensors.data_factory.AzureDataFactoryHook")
def test_adf_pipeline_status_sensor_async(self, mock_hook):
"""Assert execute method defer for Azure Data factory pipeline run status sensor"""
mock_hook.return_value.get_pipeline_run_status.return_value = AzureDataFactoryPipelineRunStatus.QUEUED
with pytest.raises(TaskDeferred) as exc:
self.SENSOR.execute({})
assert isinstance(exc.value.trigger, ADFPipelineRunStatusSensorTrigger), (
"Trigger is not a ADFPipelineRunStatusSensorTrigger"
)
def test_adf_pipeline_status_sensor_execute_complete_success(self):
"""Assert execute_complete log success message when trigger fire with target status"""
msg = f"Pipeline run {self.RUN_ID} has been succeeded."
with mock.patch.object(self.SENSOR.log, "info") as mock_log_info:
self.SENSOR.execute_complete(context={}, event={"status": "success", "message": msg})
mock_log_info.assert_called_with(msg)
def test_adf_pipeline_status_sensor_execute_complete_failure(
self,
):
"""Assert execute_complete method fail"""
with pytest.raises(AirflowException):
self.SENSOR.execute_complete(context={}, event={"status": "error", "message": ""})
| TestAzureDataFactoryPipelineRunStatusSensorWithAsync |
python | ethereum__web3.py | web3/_utils/module_testing/go_ethereum_txpool_module.py | {
"start": 95,
"end": 760
} | class ____:
@pytest.mark.asyncio
async def test_async_geth_txpool_inspect(self, async_w3: "AsyncWeb3[Any]") -> None:
test_data = await async_w3.geth.txpool.inspect()
assert "pending" in test_data
@pytest.mark.asyncio
async def test_async_geth_txpool_content(self, async_w3: "AsyncWeb3[Any]") -> None:
test_data = await async_w3.geth.txpool.content()
assert "pending" in test_data
@pytest.mark.asyncio
async def test_async_geth_txpool_status(self, async_w3: "AsyncWeb3[Any]") -> None:
test_data = await async_w3.geth.txpool.status()
assert "pending" in test_data
| GoEthereumAsyncTxPoolModuleTest |
python | scipy__scipy | scipy/integrate/_quadpack_py.py | {
"start": 52359,
"end": 52660
} | class ____:
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
| _RangeFunc |
python | pydata__xarray | xarray/computation/weighted.py | {
"start": 18322,
"end": 18620
} | class ____(Weighted["DataArray"]):
def _implementation(self, func, dim, **kwargs) -> DataArray:
self._check_dim(dim)
dataset = self.obj._to_temp_dataset()
dataset = dataset.map(func, dim=dim, **kwargs)
return self.obj._from_temp_dataset(dataset)
| DataArrayWeighted |
python | kamyu104__LeetCode-Solutions | Python/majority-element-ii.py | {
"start": 50,
"end": 1460
} | class ____(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
k, n, cnts = 3, len(nums), collections.defaultdict(int)
for i in nums:
cnts[i] += 1
# Detecting k items in cnts, at least one of them must have exactly
# one in it. We will discard those k items by one for each.
# This action keeps the same mojority numbers in the remaining numbers.
# Because if x / n > 1 / k is true, then (x - 1) / (n - k) > 1 / k is also true.
if len(cnts) == k:
for j in cnts.keys():
cnts[j] -= 1
if cnts[j] == 0:
del cnts[j]
# Resets cnts for the following counting.
for i in cnts.keys():
cnts[i] = 0
# Counts the occurrence of each candidate integer.
for i in nums:
if i in cnts:
cnts[i] += 1
# Selects the integer which occurs > [n / k] times.
result = []
for i in cnts.keys():
if cnts[i] > n / k:
result.append(i)
return result
def majorityElement2(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
return [i[0] for i in collections.Counter(nums).items() if i[1] > len(nums) / 3]
| Solution |
python | redis__redis-py | tests/test_asyncio/test_encoding.py | {
"start": 135,
"end": 2470
} | class ____:
@pytest_asyncio.fixture()
async def r(self, create_redis):
redis = await create_redis(decode_responses=True)
yield redis
await redis.flushall()
@pytest_asyncio.fixture()
async def r_no_decode(self, create_redis):
redis = await create_redis(decode_responses=False)
yield redis
await redis.flushall()
async def test_simple_encoding(self, r_no_decode: redis.Redis):
unicode_string = chr(3456) + "abcd" + chr(3421)
await r_no_decode.set("unicode-string", unicode_string.encode("utf-8"))
cached_val = await r_no_decode.get("unicode-string")
assert isinstance(cached_val, bytes)
assert unicode_string == cached_val.decode("utf-8")
async def test_simple_encoding_and_decoding(self, r: redis.Redis):
unicode_string = chr(3456) + "abcd" + chr(3421)
await r.set("unicode-string", unicode_string)
cached_val = await r.get("unicode-string")
assert isinstance(cached_val, str)
assert unicode_string == cached_val
async def test_memoryview_encoding(self, r_no_decode: redis.Redis):
unicode_string = chr(3456) + "abcd" + chr(3421)
unicode_string_view = memoryview(unicode_string.encode("utf-8"))
await r_no_decode.set("unicode-string-memoryview", unicode_string_view)
cached_val = await r_no_decode.get("unicode-string-memoryview")
# The cached value won't be a memoryview because it's a copy from Redis
assert isinstance(cached_val, bytes)
assert unicode_string == cached_val.decode("utf-8")
async def test_memoryview_encoding_and_decoding(self, r: redis.Redis):
unicode_string = chr(3456) + "abcd" + chr(3421)
unicode_string_view = memoryview(unicode_string.encode("utf-8"))
await r.set("unicode-string-memoryview", unicode_string_view)
cached_val = await r.get("unicode-string-memoryview")
assert isinstance(cached_val, str)
assert unicode_string == cached_val
async def test_list_encoding(self, r: redis.Redis):
unicode_string = chr(3456) + "abcd" + chr(3421)
result = [unicode_string, unicode_string, unicode_string]
await r.rpush("a", *result)
assert await r.lrange("a", 0, -1) == result
@pytest.mark.onlynoncluster
| TestEncoding |
python | mlflow__mlflow | dev/clint/tests/rules/test_redundant_test_docstring.py | {
"start": 2637,
"end": 3119
} | class ____:
"""Test class."""
pass
'''
config = Config(select={RedundantTestDocstring.name})
violations = lint_file(Path("module_test.py"), code, config, index_path)
assert len(violations) == 2
def test_multiline_docstrings_are_always_allowed(index_path: Path) -> None:
code = '''def test_with_multiline():
"""
Multi-line.
"""
assert True
def test_with_multiline_compact():
"""Line 1
Line 2"""
assert True
| TestClassImplementation |
python | allegroai__clearml | clearml/logger.py | {
"start": 1141,
"end": 65092
} | class ____(object):
"""
The ``Logger`` class is the ClearML console log and metric statistics interface, and contains methods for explicit
reporting.
Explicit reporting extends ClearML automagical capturing of inputs and output. Explicit reporting
methods include scalar plots, line plots, histograms, confusion matrices, 2D and 3D scatter
diagrams, text logging, tables, and image uploading and reporting.
In the ClearML Web-App (UI), ``Logger`` output appears in CONSOLE, SCALARS,
PLOTS, and DEBUG SAMPLES tabs. When you compare experiments, ``Logger`` output appears in the
comparisons.
.. warning::
Do not construct Logger objects directly.
You must get a Logger object before calling any of the other ``Logger`` class methods by calling
``Task.get_logger`` or ``Logger.current_logger``.
"""
SeriesInfo = SeriesInfo
_tensorboard_logging_auto_group_scalars = False
_tensorboard_single_series_per_graph = deferred_config("metrics.tensorboard_single_series_per_graph", False)
def __init__(
self,
private_task: _Task,
connect_stdout: bool = True,
connect_stderr: bool = True,
connect_logging: bool = False,
) -> None:
"""
.. warning::
**Do not construct Logger manually!**
Please use :meth:`Logger.get_current`
"""
assert isinstance(
private_task, _Task
), "Logger object cannot be instantiated externally, use Logger.current_logger()"
super(Logger, self).__init__()
self._task = private_task
self._default_upload_destination = None
self._flusher = None
self._report_worker = None
self._graph_titles = {}
self._tensorboard_series_force_prefix = None
self._task_handler = (
TaskHandler(task=self._task, capacity=100)
if private_task.is_main_task() or (connect_stdout or connect_stderr or connect_logging)
else None
)
self._connect_std_streams = connect_stdout or connect_stderr
self._connect_logging = connect_logging
self._default_max_sample_history = None
# Make sure urllib is never in debug/info,
disable_urllib3_info = config.get("log.disable_urllib3_info", True)
if disable_urllib3_info and logging.getLogger("urllib3").isEnabledFor(logging.INFO):
logging.getLogger("urllib3").setLevel(logging.WARNING)
if self._task.is_main_task():
StdStreamPatch.patch_std_streams(self, connect_stdout=connect_stdout, connect_stderr=connect_stderr)
if self._connect_logging:
StdStreamPatch.patch_logging_formatter(self)
elif not self._connect_std_streams and self._task.is_main_task():
# make sure that at least the main clearml logger is connect
base_logger = LoggerRoot.get_base_logger()
if base_logger and base_logger.handlers:
StdStreamPatch.patch_logging_formatter(self, base_logger.handlers[0])
@classmethod
def current_logger(cls) -> "Logger":
"""
Get the Logger object for the main execution Task, the current running Task, if one exists. If no Logger object
exists, this method creates one and returns it. Therefore, you can call this method from anywhere
in the code.
.. code-block:: py
logger = Logger.current_logger()
:return: The Logger object (a singleton) for the current running Task.
"""
from .task import Task
task = Task.current_task()
if not task:
return None
return task.get_logger()
def report_text(
self,
msg: str,
level: int = logging.INFO,
print_console: bool = True,
*args: Any,
**_: Any,
) -> None:
"""
For explicit reporting, print text to the log. Optionally, print a log level and print to the console.
For example:
.. code-block:: py
logger.report_text('log some text', level=logging.DEBUG, print_console=False)
You can view the reported text in the **ClearML Web-App (UI)**, **RESULTS** tab, **CONSOLE** sub-tab.
:param str msg: The text to log.
:param int level: The log level from the Python ``logging`` package. The default value is ``logging.INFO``.
:param bool print_console: In addition to the log, print to the console.
The values are:
- ``True`` - Print to the console. (default)
- ``False`` - Do not print to the console.
"""
force_send = not print_console and self._parse_level(level) >= logging.WARNING
return self._console(msg, level, not print_console, force_send=force_send, *args, **_)
def report_scalar(self, title: str, series: str, value: float, iteration: int) -> None:
"""
For explicit reporting, plot a scalar series.
For example, plot a scalar series:
.. code-block:: py
logger = Logger.current_logger()
scalar_series = [random.randint(0,10) for i in range(10)]
for iteration in range(10):
logger.report_scalar(
title='scalar metrics', series='series', value=scalar_series[iteration], iteration=iteration
)
You can view the scalar plots in the **ClearML Web-App (UI)**, **RESULTS** tab, **SCALARS** sub-tab.
:param str title: The title (metric) of the plot. Plot more than one scalar series on the same plot by using
the same ``title`` for each call to this method.
:param str series: The series name (variant) of the reported scalar.
:param float value: The value to plot per iteration.
:param int iteration: The reported iteration / step (x-axis of the reported time series)
"""
# if task was not started, we have to start it
self._start_task_if_needed()
self._touch_title_series(title, series)
# noinspection PyProtectedMember
return self._task._reporter.report_scalar(title=title, series=series, value=float(value), iter=iteration)
def report_single_value(self, name: str, value: float) -> None:
"""
Reports a single value metric (for example, total experiment accuracy or mAP)
You can view the metrics in the **ClearML Web-App (UI)**, **RESULTS** tab, **SCALARS** sub-tab.
:param name: Metric's name
:param value: Metric's value
"""
return self.report_scalar(title="Summary", series=name, value=value, iteration=-(2**31))
def report_vector(
self,
title: str,
series: str,
values: Sequence[Union[int, float]],
iteration: Optional[int] = None,
labels: Optional[List[str]] = None,
xlabels: Optional[List[str]] = None,
xaxis: Optional[str] = None,
yaxis: Optional[str] = None,
mode: Optional[str] = None,
extra_layout: Optional[dict] = None,
) -> None:
"""
For explicit reporting, plot a vector as (default stacked) histogram.
.. note::
This method is the same as :meth:`Logger.report_histogram`.
This method is deprecated, use :meth:`Logger.report_histogram` instead.
For example:
.. code-block:: py
vector_series = np.random.randint(10, size=10).reshape(2,5)
logger.report_vector(title='vector example', series='vector series', values=vector_series, iteration=0,
labels=['A','B'], xaxis='X axis label', yaxis='Y axis label')
You can view the vectors plot in the **ClearML Web-App (UI)**, **RESULTS** tab, **PLOTS** sub-tab.
:param title: The title (metric) of the plot.
:param series: The series name (variant) of the reported histogram.
:param values: The series values. A list of floats, or an N-dimensional Numpy array containing
data for each histogram bar.
:param iteration: The reported iteration / step. Each ``iteration`` creates another plot.
:param labels: Labels for each bar group, creating a plot legend labeling each series. (Optional)
:param xlabels: Labels per entry in each bucket in the histogram (vector), creating a set of labels
for each histogram bar on the x-axis. (Optional)
:param xaxis: The x-axis title. (Optional)
:param yaxis: The y-axis title. (Optional)
:param mode: Multiple histograms mode, stack / group / relative. Default is 'group'.
:param extra_layout: Optional dictionary for layout configuration, passed directly to plotly.
See full details on the supported configuration: https://plotly.com/javascript/reference/layout/
example: ``extra_layout={'showlegend': False, 'plot_bgcolor': 'yellow'}``
"""
warnings.warn(
":meth:`Logger.report_vector` is deprecated; use :meth:`Logger.report_histogram` instead.",
DeprecationWarning,
)
self._touch_title_series(title, series)
return self.report_histogram(
title,
series,
values,
iteration or 0,
labels=labels,
xlabels=xlabels,
xaxis=xaxis,
yaxis=yaxis,
mode=mode,
extra_layout=extra_layout,
)
def report_histogram(
self,
title: str,
series: str,
values: Sequence[Union[int, float]],
iteration: Optional[int] = None,
labels: Optional[List[str]] = None,
xlabels: Optional[List[str]] = None,
xaxis: Optional[str] = None,
yaxis: Optional[str] = None,
mode: Optional[str] = None,
data_args: Optional[dict] = None,
extra_layout: Optional[dict] = None,
) -> None:
"""
For explicit reporting, plot a (default grouped) histogram.
Notice this function will not calculate the histogram,
it assumes the histogram was already calculated in `values`
For example:
.. code-block:: py
vector_series = np.random.randint(10, size=10).reshape(2,5)
logger.report_histogram(title='histogram example', series='histogram series',
values=vector_series, iteration=0, labels=['A','B'], xaxis='X axis label', yaxis='Y axis label')
You can view the reported histograms in the **ClearML Web-App (UI)**, **RESULTS** tab, **PLOTS** sub-tab.
:param title: The title (metric) of the plot.
:param series: The series name (variant) of the reported histogram.
:param values: The series values. A list of floats, or an N-dimensional Numpy array containing
data for each histogram bar.
:param iteration: The reported iteration / step. Each ``iteration`` creates another plot.
:param labels: Labels for each bar group, creating a plot legend labeling each series. (Optional)
:param xlabels: Labels per entry in each bucket in the histogram (vector), creating a set of labels
for each histogram bar on the x-axis. (Optional)
:param xaxis: The x-axis title. (Optional)
:param yaxis: The y-axis title. (Optional)
:param mode: Multiple histograms mode, stack / group / relative. Default is 'group'.
:param data_args: optional dictionary for data configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/bar/
example: ``data_args={'orientation': 'h', 'marker': {'color': 'blue'}}``
:param extra_layout: optional dictionary for layout configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/bar/
example: ``extra_layout={'xaxis': {'type': 'date', 'range': ['2020-01-01', '2020-01-31']}}``
"""
if not isinstance(values, np.ndarray):
values = np.array(values)
# if task was not started, we have to start it
self._start_task_if_needed()
self._touch_title_series(title, series)
# noinspection PyProtectedMember
return self._task._reporter.report_histogram(
title=title,
series=series,
histogram=values,
iter=iteration or 0,
labels=labels,
xlabels=xlabels,
xtitle=xaxis,
ytitle=yaxis,
mode=mode or "group",
data_args=data_args,
layout_config=extra_layout,
)
def report_table(
self,
title: str,
series: str,
iteration: Optional[int] = None,
table_plot: Optional[Union["pd.DataFrame", Sequence[Sequence]]] = None,
csv: Optional[str] = None,
url: Optional[str] = None,
extra_layout: Optional[dict] = None,
extra_data: Optional[dict] = None,
) -> None:
"""
For explicit reporting, report a table plot.
One and only one of the following parameters must be provided.
- ``table_plot`` - Pandas DataFrame or Table as list of rows (list)
- ``csv`` - CSV file
- ``url`` - URL to CSV file
For example:
.. code-block:: py
df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
'num_wings': [2, 0, 0, 0],
'num_specimen_seen': [10, 2, 1, 8]},
index=['falcon', 'dog', 'spider', 'fish'])
logger.report_table(title='table example',series='pandas DataFrame',iteration=0,table_plot=df)
You can view the reported tables in the **ClearML Web-App (UI)**, **RESULTS** tab, **PLOTS** sub-tab.
:param title: The title (metric) of the table.
:param series: The series name (variant) of the reported table.
:param iteration: The reported iteration / step.
:param table_plot: The output table plot object
:param csv: path to local csv file
:param url: A URL to the location of csv file.
:param extra_layout: optional dictionary for layout configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/layout/
For example:
.. code-block:: py
logger.report_table(
title='table example',
series='pandas DataFrame',
iteration=0,
table_plot=df,
extra_layout={'height': 600}
)
:param extra_data: optional dictionary for data configuration, like column width, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/table/
For example:
.. code-block:: py
logger.report_table(
title='table example',
series='pandas DataFrame',
iteration=0,
table_plot=df,
extra_data={'columnwidth': [2., 1., 1., 1.]}
)
"""
mutually_exclusive(UsageError, _check_none=True, table_plot=table_plot, csv=csv, url=url)
table = table_plot
if url or csv:
if not pd:
raise UsageError(
"pandas is required in order to support reporting tables using CSV or a URL, "
"please install the pandas python package"
)
if url:
table = pd.read_csv(url, index_col=[0])
elif csv:
table = pd.read_csv(csv, index_col=[0])
def replace(dst: Any, *srcs: Any) -> None:
for src in srcs:
reporter_table.replace(src, dst, inplace=True)
if isinstance(table, (list, tuple)):
reporter_table = table
else:
reporter_table = table.fillna(str(np.nan))
replace("NaN", np.nan, math.nan if six.PY3 else float("nan"))
replace("Inf", np.inf, math.inf if six.PY3 else float("inf"))
minus_inf = [-np.inf, -math.inf if six.PY3 else -float("inf")]
try:
minus_inf.append(np.NINF)
except AttributeError:
# NINF has been removed in numpy>2.0
pass
replace("-Inf", *minus_inf)
# noinspection PyProtectedMember
return self._task._reporter.report_table(
title=title,
series=series,
table=reporter_table,
iteration=iteration or 0,
layout_config=extra_layout,
data_config=extra_data,
)
def report_line_plot(
self,
title: str,
series: Sequence[SeriesInfo],
xaxis: str,
yaxis: str,
mode: str = "lines",
iteration: Optional[int] = None,
reverse_xaxis: bool = False,
comment: Optional[str] = None,
extra_layout: Optional[dict] = None,
) -> None:
"""
For explicit reporting, plot one or more series as lines.
:param str title: The title (metric) of the plot.
:param list series: All the series data, one list element for each line in the plot.
:param int iteration: The reported iteration / step.
:param str xaxis: The x-axis title. (Optional)
:param str yaxis: The y-axis title. (Optional)
:param str mode: The type of line plot.
The values are:
- ``lines`` (default)
- ``markers``
- ``lines+markers``
:param bool reverse_xaxis: Reverse the x-axis.
The values are:
- ``True`` - The x-axis is high to low (reversed).
- ``False`` - The x-axis is low to high (not reversed). (default)
:param str comment: A comment displayed with the plot, underneath the title.
:param dict extra_layout: optional dictionary for layout configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/scatter/
example: ``extra_layout={'xaxis': {'type': 'date', 'range': ['2020-01-01', '2020-01-31']}}``
.. note::
This method is the same as :meth:`Logger.report_scatter2d` with :param:`mode='lines'`.
This method is deprecated, use :meth:`Logger.report_scatter2d` instead.
"""
warnings.warn(
":meth:`Logger.report_line_plot` is deprecated;"
"use :meth:`Logger.report_scatter2d` instead, e.g., with :param:`mode='lines'`.",
DeprecationWarning,
)
# noinspection PyArgumentList
series = [self.SeriesInfo(**s) if isinstance(s, dict) else s for s in series]
# if task was not started, we have to start it
self._start_task_if_needed()
self._touch_title_series(title, series[0].name if series else "")
# noinspection PyProtectedMember
return self._task._reporter.report_line_plot(
title=title,
series=series,
iter=iteration or 0,
xtitle=xaxis,
ytitle=yaxis,
mode=mode,
reverse_xaxis=reverse_xaxis,
comment=comment,
layout_config=extra_layout,
)
def report_scatter2d(
self,
title: str,
series: str,
scatter: Union[Sequence[Tuple[float, float]], np.ndarray],
iteration: Optional[int] = None,
xaxis: Optional[str] = None,
yaxis: Optional[str] = None,
labels: Optional[List[str]] = None,
mode: str = "lines",
comment: Optional[str] = None,
extra_layout: Optional[dict] = None,
) -> None:
"""
For explicit reporting, report a 2d scatter plot.
For example:
.. code-block:: py
scatter2d = np.hstack((np.atleast_2d(np.arange(0, 10)).T, np.random.randint(10, size=(10, 1))))
logger.report_scatter2d(title="example_scatter", series="series", iteration=0, scatter=scatter2d,
xaxis="title x", yaxis="title y")
Plot multiple 2D scatter series on the same plot by passing the same ``title`` and ``iteration`` values
to this method:
.. code-block:: py
scatter2d_1 = np.hstack((np.atleast_2d(np.arange(0, 10)).T, np.random.randint(10, size=(10, 1))))
logger.report_scatter2d(title="example_scatter", series="series_1", iteration=1, scatter=scatter2d_1,
xaxis="title x", yaxis="title y")
scatter2d_2 = np.hstack((np.atleast_2d(np.arange(0, 10)).T, np.random.randint(10, size=(10, 1))))
logger.report_scatter2d("example_scatter", "series_2", iteration=1, scatter=scatter2d_2,
xaxis="title x", yaxis="title y")
:param str title: The title (metric) of the plot.
:param str series: The series name (variant) of the reported scatter plot.
:param list scatter: The scatter data. numpy.ndarray or list of (pairs of x,y) scatter:
:param int iteration: The reported iteration / step.
:param str xaxis: The x-axis title. (Optional)
:param str yaxis: The y-axis title. (Optional)
:param list(str) labels: Labels per point in the data assigned to the ``scatter`` parameter. The labels must be
in the same order as the data.
:param str mode: The type of scatter plot. The values are:
- ``lines``
- ``markers``
- ``lines+markers``
:param str comment: A comment displayed with the plot, underneath the title.
:param dict extra_layout: optional dictionary for layout configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/scatter/
example: ``extra_layout={'xaxis': {'type': 'date', 'range': ['2020-01-01', '2020-01-31']}}``
"""
if not isinstance(scatter, np.ndarray):
if not isinstance(scatter, list):
scatter = list(scatter)
scatter = np.array(scatter)
# if task was not started, we have to start it
self._start_task_if_needed()
self._touch_title_series(title, series)
# noinspection PyProtectedMember
return self._task._reporter.report_2d_scatter(
title=title,
series=series,
data=scatter,
iter=iteration or 0,
mode=mode,
xtitle=xaxis,
ytitle=yaxis,
labels=labels,
comment=comment,
layout_config=extra_layout,
)
def report_scatter3d(
self,
title: str,
series: str,
scatter: Union[Sequence[Tuple[float, float, float]], np.ndarray],
iteration: Optional[int] = None,
xaxis: Optional[str] = None,
yaxis: Optional[str] = None,
zaxis: Optional[str] = None,
labels: Optional[List[str]] = None,
mode: str = "markers",
fill: bool = False,
comment: Optional[str] = None,
extra_layout: Optional[dict] = None,
) -> None:
"""
For explicit reporting, plot a 3d scatter graph (with markers).
:param str title: The title (metric) of the plot.
:param str series: The series name (variant) of the reported scatter plot.
:param scatter: The scatter data.
list of (pairs of x,y,z), list of series [[(x1,y1,z1)...]], or numpy.ndarray
:param int iteration: The reported iteration / step.
:param str xaxis: The x-axis title. (Optional)
:param str yaxis: The y-axis title. (Optional)
:param str zaxis: The z-axis title. (Optional)
:param list(str) labels: Labels per point in the data assigned to the ``scatter`` parameter. The labels must be
in the same order as the data.
:param str mode: The type of scatter plot. The values are: ``lines``, ``markers``, ``lines+markers``.
For example:
.. code-block:: py
scatter3d = np.random.randint(10, size=(10, 3))
logger.report_scatter3d(title="example_scatter_3d", series="series_xyz", iteration=1, scatter=scatter3d,
xaxis="title x", yaxis="title y", zaxis="title z")
:param bool fill: Fill the area under the curve. The values are:
- ``True`` - Fill
- ``False`` - Do not fill (default)
:param str comment: A comment displayed with the plot, underneath the title.
:param dict extra_layout: optional dictionary for layout configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/scatter3d/
example: ``extra_layout={'xaxis': {'type': 'date', 'range': ['2020-01-01', '2020-01-31']}}``
"""
# check if multiple series
multi_series = isinstance(scatter, list) and (
isinstance(scatter[0], np.ndarray)
or (scatter[0] and isinstance(scatter[0], list) and isinstance(scatter[0][0], list))
)
if not multi_series:
if not isinstance(scatter, np.ndarray):
if not isinstance(scatter, list):
scatter = list(scatter)
scatter = np.array(scatter)
try:
scatter = scatter.astype(np.float32)
except ValueError:
pass
# if task was not started, we have to start it
self._start_task_if_needed()
self._touch_title_series(title, series)
# noinspection PyProtectedMember
return self._task._reporter.report_3d_scatter(
title=title,
series=series,
data=scatter,
iter=iteration or 0,
labels=labels,
mode=mode,
fill=fill,
comment=comment,
xtitle=xaxis,
ytitle=yaxis,
ztitle=zaxis,
layout_config=extra_layout,
)
def report_confusion_matrix(
self,
title: str,
series: str,
matrix: np.ndarray,
iteration: Optional[int] = None,
xaxis: Optional[str] = None,
yaxis: Optional[str] = None,
xlabels: Optional[List[str]] = None,
ylabels: Optional[List[str]] = None,
yaxis_reversed: bool = False,
comment: Optional[str] = None,
extra_layout: Optional[dict] = None,
) -> None:
"""
For explicit reporting, plot a heat-map matrix.
For example:
.. code-block:: py
confusion = np.random.randint(10, size=(10, 10))
logger.report_confusion_matrix("example confusion matrix", "ignored", iteration=1, matrix=confusion,
xaxis="title X", yaxis="title Y")
:param str title: The title (metric) of the plot.
:param str series: The series name (variant) of the reported confusion matrix.
:param numpy.ndarray matrix: A heat-map matrix (example: confusion matrix)
:param int iteration: The reported iteration / step.
:param str xaxis: The x-axis title. (Optional)
:param str yaxis: The y-axis title. (Optional)
:param list(str) xlabels: Labels for each column of the matrix. (Optional)
:param list(str) ylabels: Labels for each row of the matrix. (Optional)
:param bool yaxis_reversed: If False 0,0 is at the bottom left corner. If True, 0,0 is at the top left corner
:param str comment: A comment displayed with the plot, underneath the title.
:param dict extra_layout: optional dictionary for layout configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/heatmap/
example: ``extra_layout={'xaxis': {'type': 'date', 'range': ['2020-01-01', '2020-01-31']}}``
"""
if not isinstance(matrix, np.ndarray):
matrix = np.array(matrix)
if extra_layout is None:
extra_layout = {"texttemplate": "%{z}"}
# if task was not started, we have to start it
self._start_task_if_needed()
self._touch_title_series(title, series)
# noinspection PyProtectedMember
return self._task._reporter.report_value_matrix(
title=title,
series=series,
data=matrix.astype(np.float32),
iter=iteration or 0,
xtitle=xaxis,
ytitle=yaxis,
xlabels=xlabels,
ylabels=ylabels,
yaxis_reversed=yaxis_reversed,
comment=comment,
layout_config=extra_layout,
)
def report_matrix(
self,
title: str,
series: str,
matrix: np.ndarray,
iteration: Optional[int] = None,
xaxis: Optional[str] = None,
yaxis: Optional[str] = None,
xlabels: Optional[List[str]] = None,
ylabels: Optional[List[str]] = None,
yaxis_reversed: bool = False,
extra_layout: Optional[dict] = None,
) -> None:
"""
For explicit reporting, plot a confusion matrix.
.. note::
This method is the same as :meth:`Logger.report_confusion_matrix`.
This method is deprecated, use :meth:`Logger.report_confusion_matrix` instead.
:param str title: The title (metric) of the plot.
:param str series: The series name (variant) of the reported confusion matrix.
:param numpy.ndarray matrix: A heat-map matrix (example: confusion matrix)
:param int iteration: The reported iteration / step.
:param str xaxis: The x-axis title. (Optional)
:param str yaxis: The y-axis title. (Optional)
:param list(str) xlabels: Labels for each column of the matrix. (Optional)
:param list(str) ylabels: Labels for each row of the matrix. (Optional)
:param bool yaxis_reversed: If False, 0,0 is in the bottom left corner. If True, 0,0 is in the top left corner
:param dict extra_layout: optional dictionary for layout configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/heatmap/
example: ``extra_layout={'xaxis': {'type': 'date', 'range': ['2020-01-01', '2020-01-31']}}``
"""
warnings.warn(
":meth:`Logger.report_matrix` is deprecated; use :meth:`Logger.report_confusion_matrix` instead.",
DeprecationWarning,
)
self._touch_title_series(title, series)
return self.report_confusion_matrix(
title,
series,
matrix,
iteration or 0,
xaxis=xaxis,
yaxis=yaxis,
xlabels=xlabels,
ylabels=ylabels,
yaxis_reversed=yaxis_reversed,
extra_layout=extra_layout,
)
def report_surface(
self,
title: str,
series: str,
matrix: np.ndarray,
iteration: Optional[int] = None,
xaxis: Optional[str] = None,
yaxis: Optional[str] = None,
zaxis: Optional[str] = None,
xlabels: Optional[List[str]] = None,
ylabels: Optional[List[str]] = None,
camera: Optional[Sequence[float]] = None,
comment: Optional[str] = None,
extra_layout: Optional[dict] = None,
) -> None:
"""
For explicit reporting, report a 3d surface plot.
.. note::
This method plots the same data as :meth:`Logger.report_confusion_matrix`, but presents the
data as a surface diagram not a confusion matrix.
.. code-block:: py
surface_matrix = np.random.randint(10, size=(10, 10))
logger.report_surface("example surface", "series", iteration=0, matrix=surface_matrix,
xaxis="title X", yaxis="title Y", zaxis="title Z")
:param str title: The title (metric) of the plot.
:param str series: The series name (variant) of the reported surface.
:param numpy.ndarray matrix: A heat-map matrix (example: confusion matrix)
:param int iteration: The reported iteration / step.
:param str xaxis: The x-axis title. (Optional)
:param str yaxis: The y-axis title. (Optional)
:param str zaxis: The z-axis title. (Optional)
:param list(str) xlabels: Labels for each column of the matrix. (Optional)
:param list(str) ylabels: Labels for each row of the matrix. (Optional)
:param list(float) camera: X,Y,Z coordinates indicating the camera position. The default value is ``(1,1,1)``.
:param str comment: A comment displayed with the plot, underneath the title.
:param dict extra_layout: optional dictionary for layout configuration, passed directly to plotly
See full details on the supported configuration: https://plotly.com/javascript/reference/surface/
example: ``extra_layout={'xaxis': {'type': 'date', 'range': ['2020-01-01', '2020-01-31']}}``
"""
if not isinstance(matrix, np.ndarray):
matrix = np.array(matrix)
# if task was not started, we have to start it
self._start_task_if_needed()
self._touch_title_series(title, series)
# noinspection PyProtectedMember
return self._task._reporter.report_value_surface(
title=title,
series=series,
data=matrix.astype(np.float32),
iter=iteration or 0,
xlabels=xlabels,
ylabels=ylabels,
xtitle=xaxis,
ytitle=yaxis,
ztitle=zaxis,
camera=camera,
comment=comment,
layout_config=extra_layout,
)
def report_image(
self,
title: str,
series: str,
iteration: Optional[int] = None,
local_path: Optional[str] = None,
image: Optional[Union[np.ndarray, Image.Image]] = None,
matrix: Optional[np.ndarray] = None,
max_image_history: Optional[int] = None,
delete_after_upload: bool = False,
url: Optional[str] = None,
) -> None:
"""
For explicit reporting, report an image and upload its contents.
This method uploads the image to a preconfigured bucket (see :meth:`Logger.set_default_upload_destination`)
describing the task ID, title, series and iteration.
For example:
.. code-block:: py
matrix = np.eye(256, 256, dtype=np.uint8)*255
matrix = np.concatenate((np.atleast_3d(matrix), np.zeros((256, 256, 2), dtype=np.uint8)), axis=2)
logger.report_image("test case", "image color red", iteration=1, image=m)
image_open = Image.open(os.path.join("<image_path>", "<image_filename>"))
logger.report_image("test case", "image PIL", iteration=1, image=image_open)
One and only one of the following parameters must be provided.
- ``local_path``
- ``url``
- ``image``
- ``matrix``
:param title: The title (metric) of the image.
:param series: The series name (variant) of the reported image.
:param iteration: The reported iteration / step.
:param local_path: A path to an image file.
:param url: A URL for the location of a pre-uploaded image.
:param image: Image data (RGB).
:param matrix: Deprecated, Image data (RGB).
.. note::
The ``matrix`` parameter is deprecated. Use the ``image`` parameters.
:param max_image_history: The maximum number of images to store per metric/variant combination.
For an unlimited number, use a negative value. The default value is set in global configuration
(default=``5``).
:param delete_after_upload: After the upload, delete the local copy of the image. The values are:
- ``True`` - Delete after upload.
- ``False`` - Do not delete after upload. (default)
"""
mutually_exclusive(
UsageError,
_check_none=True,
local_path=local_path or None,
url=url or None,
image=image,
matrix=matrix,
)
if matrix is not None:
warnings.warn(
"'matrix' variable is deprecated; use 'image' instead.",
DeprecationWarning,
)
if image is None:
image = matrix
if image is not None and not isinstance(image, (np.ndarray, Image.Image)):
raise ValueError("Supported 'image' types are: numpy.ndarray or PIL.Image")
# if task was not started, we have to start it
self._start_task_if_needed()
self._touch_title_series(title, series)
if url:
# noinspection PyProtectedMember
self._task._reporter.report_image(
title=title,
series=series,
src=url,
iter=iteration or 0,
)
else:
upload_uri = self.get_default_upload_destination()
if not upload_uri:
upload_uri = Path(get_cache_dir()) / "debug_images"
upload_uri.mkdir(parents=True, exist_ok=True)
# Verify that we can upload to this destination
upload_uri = str(upload_uri)
storage = StorageHelper.get(upload_uri)
upload_uri = storage.verify_upload(folder_uri=upload_uri)
if isinstance(image, Image.Image):
image = np.array(image) # noqa
# noinspection PyProtectedMember
self._task._reporter.report_image_and_upload(
title=title,
series=series,
path=local_path,
image=image,
iter=iteration or 0,
upload_uri=upload_uri,
max_image_history=max_image_history
if max_image_history is not None
else self._default_max_sample_history,
delete_after_upload=delete_after_upload,
)
def report_media(
self,
title: str,
series: str,
iteration: Optional[int] = None,
local_path: Optional[str] = None,
stream: Optional[Union[six.BytesIO, six.StringIO]] = None,
file_extension: Optional[str] = None,
max_history: Optional[int] = None,
delete_after_upload: bool = False,
url: Optional[str] = None,
) -> None:
"""
Report media upload its contents, including images, audio, and video.
Media is uploaded to a preconfigured bucket (see setup_upload()) with a key (filename)
describing the task ID, title, series and iteration.
One and only one of the following parameters must be provided
- ``local_path``
- ``stream``
- ``url``
If you use ``stream`` for a BytesIO stream to upload, ``file_extension`` must be provided.
:param str title: The title (metric) of the media.
:param str series: The series name (variant) of the reported media.
:param int iteration: The reported iteration / step.
:param str local_path: A path to a media file.
:param stream: BytesIO stream to upload. If provided, ``file_extension`` must also be provided.
:param str url: A URL to the location of a pre-uploaded media.
:param file_extension: A file extension to use when ``stream`` is passed.
:param int max_history: The maximum number of media files to store per metric/variant combination.
Use negative value for unlimited. Default is set in global configuration (default=5)
:param bool delete_after_upload: After the file is uploaded, delete the local copy
- ``True`` - Delete
- ``False`` - Do not delete
"""
mutually_exclusive(
UsageError,
_check_none=True,
local_path=local_path or None,
url=url or None,
stream=stream,
)
if stream is not None and not file_extension:
raise ValueError("No file extension provided for stream media upload")
# if task was not started, we have to start it
self._start_task_if_needed()
self._touch_title_series(title, series)
if url:
# noinspection PyProtectedMember
self._task._reporter.report_media(
title=title,
series=series,
src=url,
iter=iteration or 0,
)
else:
upload_uri = self.get_default_upload_destination()
if not upload_uri:
upload_uri = Path(get_cache_dir()) / "debug_images"
upload_uri.mkdir(parents=True, exist_ok=True)
# Verify that we can upload to this destination
upload_uri = str(upload_uri)
storage = StorageHelper.get(upload_uri)
upload_uri = storage.verify_upload(folder_uri=upload_uri)
# noinspection PyProtectedMember
self._task._reporter.report_media_and_upload(
title=title,
series=series,
path=local_path,
stream=stream,
iter=iteration or 0,
upload_uri=upload_uri,
max_history=max_history if max_history is not None else self._default_max_sample_history,
delete_after_upload=delete_after_upload,
file_extension=file_extension,
)
def report_plotly(
self,
title: str,
series: str,
figure: Union[Dict, "Figure"], # noqa: F821
iteration: Optional[int] = None,
) -> None:
"""
Report a ``Plotly`` figure (plot) directly
``Plotly`` figure can be a ``plotly.graph_objs._figure.Figure`` or a dictionary as defined by ``plotly.js``
:param str title: The title (metric) of the plot.
:param str series: The series name (variant) of the reported plot.
:param int iteration: The reported iteration / step.
:param dict figure: A ``plotly`` Figure object or a ``plotly`` dictionary
"""
# if task was not started, we have to start it
self._start_task_if_needed()
self._touch_title_series(title, series)
plot = figure if isinstance(figure, dict) else figure.to_plotly_json()
# noinspection PyBroadException
try:
plot["layout"]["title"] = series
except Exception:
pass
# noinspection PyProtectedMember
self._task._reporter.report_plot(
title=title,
series=series,
plot=plot,
iter=iteration or 0,
)
def report_matplotlib_figure(
self,
title: str,
series: str,
figure: Union["MatplotlibFigure", "pyplot"],
iteration: Optional[int] = None,
report_image: bool = False,
report_interactive: bool = True,
) -> None:
"""
Report a ``matplotlib`` figure / plot directly
``matplotlib.figure.Figure`` / ``matplotlib.pyplot``
:param str title: The title (metric) of the plot.
:param str series: The series name (variant) of the reported plot.
:param int iteration: The reported iteration / step.
:param MatplotlibFigure figure: A ``matplotlib`` Figure object
:param report_image: Default False. If True, the plot will be uploaded as a debug sample (png image),
and will appear under the debug samples tab (instead of the Plots tab).
:param report_interactive: If True (default), it will try to convert the matplotlib into interactive
plot in the UI. If False, the matplotlib is saved as is and will
be non-interactive (except zooming in/out)
"""
# if task was not started, we have to start it
self._start_task_if_needed()
# noinspection PyProtectedMember
self._task._reporter.report_matplotlib(
title=title,
series=series,
figure=figure,
iter=iteration or 0,
logger=self,
force_save_as_image=False if report_interactive and not report_image else ("png" if report_image else True),
)
def set_default_upload_destination(self, uri: str) -> None:
"""
Set the destination storage URI (for example, S3, Google Cloud Storage, a file path) for uploading debug images.
The images are uploaded separately. A link to each image is reported.
.. note::
Credentials for the destination storage are specified in the ClearML configuration file,
``~/clearml.conf``.
:param str uri: example: 's3://bucket/directory/' or 'file:///tmp/debug/'
:return: True, if the destination scheme is supported (for example, ``s3://``, ``file://``, or ``gs://``).
False, if not supported.
"""
# Create the storage helper
storage = StorageHelper.get(uri)
# Verify that we can upload to this destination
uri = storage.verify_upload(folder_uri=uri)
self._default_upload_destination = uri
def get_default_upload_destination(self) -> str:
"""
Get the destination storage URI (for example, S3, Google Cloud Storage, a file path) for uploading debug images
(see :meth:`Logger.set_default_upload_destination`).
:return: The default upload destination URI.
For example: ``s3://bucket/directory/``, or ``file:///tmp/debug/``.
"""
# noinspection PyProtectedMember
return self._default_upload_destination or self._task._get_default_report_storage_uri()
def flush(self, wait: bool = False) -> bool:
"""
Flush cached reports and console outputs to backend.
:param wait: Wait for all outstanding uploads and events to be sent (default False)
:return: True, if successfully flushed the cache. False, if failed.
"""
self._flush_stdout_handler()
if self._task:
return self._task.flush(wait_for_uploads=wait)
return False
def get_flush_period(self) -> Optional[float]:
"""
Get the Logger flush period.
:return: The logger flush period in seconds.
"""
if self._flusher:
return self._flusher.period
return None
def set_flush_period(self, period: float) -> None:
"""
Set the logger flush period.
Deprecated - Use ``sdk.development.worker.report_period_sec`` to externally control the flush period.
:param float period: The period to flush the logger in seconds. To set no periodic flush,
specify ``None`` or ``0``.
"""
pass
def set_default_debug_sample_history(self, max_history: int) -> None:
"""
Set the default maximum debug sample history when reporting media/debug samples.
Overrides the configuration file defaults.
When reporting debug samples with the same title/series combination and running iterations,
only the last X samples are stored (in other words samples are overwritten).
The default history size set with `max_history` is used when calling
`report_image`, `report_media` etc. without specifying `max_history`
:param max_history: Number of samples (files) to store on a unique set of title/series being reported
with different iteration counters. This is used to make sure users do not end up exploding storage
on server storage side.
For example the following code sample will store the last 5 images even though
we are reporting 100 samples.
.. code-block:: py
logger.set_default_debug_sample_history(5)
for i in range(100):
logger.report_image(title='image', series='sample', iteration=i, ...)
:return:
"""
self._default_max_sample_history = int(max_history)
def get_default_debug_sample_history(self) -> int:
"""
Return the default max debug sample history when reporting media/debug samples.
If value was not set specifically, the function returns the configuration file default value.
:return: default number of samples (files) to store on a unique set of title/series being reported
with different iteration counters. This is used to make sure users do not end up exploding storage
on server storage side.
"""
if self._default_max_sample_history is not None:
return self._default_max_sample_history
# noinspection PyProtectedMember
return int(UploadEvent._file_history_size)
def report_image_and_upload(
self,
title: str,
series: str,
iteration: Optional[int] = None,
path: Optional[str] = None,
matrix: Optional[Union[np.ndarray, Image.Image]] = None,
max_image_history: Optional[int] = None,
delete_after_upload: bool = False,
) -> None:
"""
.. deprecated:: 0.13.0
Use :meth:`Logger.report_image` instead
"""
self.report_image(
title=title,
series=series,
iteration=iteration or 0,
local_path=path,
image=matrix,
max_image_history=max_image_history,
delete_after_upload=delete_after_upload,
)
def capture_logging(self) -> Any:
"""
Return context capturing all the logs (via logging) reported under the context
:return: a ContextManager
"""
class _LoggingContext(object):
def __init__(self, a_logger: Logger) -> None:
self.logger = a_logger
def __enter__(self, *_: Any, **__: Any) -> None:
if not self.logger:
return
StdStreamPatch.patch_logging_formatter(self.logger)
def __exit__(self, *_: Any, **__: Any) -> None:
if not self.logger:
return
StdStreamPatch.remove_patch_logging_formatter()
# Do nothing if we already have full logging support
return _LoggingContext(None if self._connect_logging else self)
@classmethod
def tensorboard_auto_group_scalars(cls, group_scalars: bool = False) -> None:
"""
Group together TensorBoard scalars that do not have a title, or assign a title/series with the same tag.
:param group_scalars: Group TensorBoard scalars without a title
The values are:
- ``True`` - Scalars without specific titles are grouped together in the "Scalars" plot, preserving
backward compatibility with ClearML automagical behavior.
- ``False`` - TensorBoard scalars without titles get a title/series with the same tag. (default)
"""
cls._tensorboard_logging_auto_group_scalars = group_scalars
@classmethod
def tensorboard_single_series_per_graph(cls, single_series: bool = False) -> None:
"""
Deprecated, this is now controlled from the UI!
Group TensorBoard scalar series together or in separate plots.
:param single_series: Group TensorBoard scalar series together
The values are:
- ``True`` - Generate a separate plot for each TensorBoard scalar series.
- ``False`` - Group the TensorBoard scalar series together in the same plot. (default)
"""
cls._tensorboard_single_series_per_graph = single_series
@classmethod
def matplotlib_force_report_non_interactive(cls, force: bool) -> None:
"""
If True, all matplotlib are always converted to non-interactive static plots (images), appearing in under
the Plots section. If False (default), matplotlib figures are converted into interactive web UI plotly
figures, in case figure conversion fails, it defaults to non-interactive plots.
:param force: If True, all matplotlib figures are converted automatically to non-interactive plots.
"""
from clearml.backend_interface.metrics import Reporter
Reporter.matplotlib_force_report_non_interactive(force=force)
@classmethod
def set_reporting_nan_value(cls, value: float, warn_period: int = 1000) -> None:
"""
When a NaN value is encountered, it is reported as a floating value (by default 0) and the user is warned.
This function is used to change the value NaN is converted to and the warning period.
:param value: The value NaN is converted to
:param warn_period: Number of times NaN is encountered and converted until the next warning
"""
MetricsEventAdapter.default_nan_value = value
MetricsEventAdapter.report_nan_warning_period = warn_period
@classmethod
def set_reporting_inf_value(cls, value: float, warn_period: int = 1000) -> None:
"""
When an inf value is encountered, it is reported as a floating value (by default 0) and the user is warned.
This function is used to change the value inf is converted to and the warning period.
:param value: The value inf is converted to
:param warn_period: Number of times inf is encountered and converted until the next warning
"""
MetricsEventAdapter.default_inf_value = value
MetricsEventAdapter.report_inf_warning_period = warn_period
@classmethod
def _remove_std_logger(cls) -> bool:
# noinspection PyBroadException
try:
StdStreamPatch.remove_std_logger()
except Exception:
return False
return True
def _parse_level(self, level: Any) -> int:
try:
return int(level)
except (TypeError, ValueError):
self._task.log.log(
level=logging.ERROR,
msg='Logger failed casting log level "%s" to integer' % str(level),
)
return logging.INFO
def _console(
self,
msg: str,
level: int = logging.INFO,
omit_console: bool = False,
force_send: bool = False,
*args: Any,
**_: Any,
) -> None:
"""
print text to log (same as print to console, and also prints to console)
:param msg: text to print to the console (always send to the backend and displayed in console)
:param level: logging level, default: logging.INFO
:param omit_console: Omit the console output, and only send the ``msg`` value to the log
:param force_send: Report with an explicit log level. Only supported if ``omit_console`` is True
- ``True`` - Omit the console output.
- ``False`` - Print the console output. (default)
"""
level = self._parse_level(level)
force_send = force_send and omit_console
# noinspection PyProtectedMember
if not self._skip_console_log() or not self._task._is_remote_main_task() or force_send:
# check if we have a TaskHandler and that it is valid (shutdown will clear the .task_id)
if self._task_handler and self._task_handler.task_id:
# noinspection PyBroadException
try:
record = self._task.log.makeRecord(
"console",
level=level,
fn="",
lno=0,
func="",
msg=msg,
args=args,
exc_info=None,
)
# find the task handler that matches our task
self._task_handler.emit(record)
except Exception:
# avoid infinite loop, output directly to stderr
# noinspection PyBroadException
try:
# make sure we are writing to the original stdout
StdStreamPatch.stderr_original_write(
'clearml.Logger failed sending log [level {}]: "{}"\n'.format(level, msg)
)
except Exception:
pass
else:
# noinspection PyProtectedMember
self._task._reporter.report_console(message=msg, level=level)
if not omit_console:
# if we are here and we grabbed the stdout, we need to print the real thing
if self._connect_std_streams and not self._skip_console_log():
# noinspection PyBroadException
try:
# make sure we are writing to the original stdout
StdStreamPatch.stdout_original_write(str(msg) + "\n")
except Exception:
pass
else:
print(str(msg))
# if task was not started, we have to start it
self._start_task_if_needed()
def _report_image_plot_and_upload(
self,
title: str,
series: str,
iteration: Optional[int] = None,
path: Optional[str] = None,
matrix: Optional[np.ndarray] = None,
max_image_history: Optional[int] = None,
delete_after_upload: bool = False,
) -> None:
"""
Report an image, upload its contents, and present in plots section using plotly
Image is uploaded to a preconfigured bucket (see :meth:`Logger.set_default_upload_destination`)
describing the task ID, title, series and iteration.
:param title: Title (AKA metric)
:param series: Series (AKA variant)
:param iteration: Iteration number
:param path: A path to an image file. Required unless matrix is provided.
:param matrix: A 3D numpy.ndarray object containing image data (RGB). Required unless filename is provided.
:param max_image_history: maximum number of image to store per metric/variant combination \
use negative value for unlimited. default is set in global configuration (default=5)
:param delete_after_upload: if True, one the file was uploaded the local copy will be deleted
"""
# if task was not started, we have to start it
self._start_task_if_needed()
upload_uri = self.get_default_upload_destination()
if not upload_uri:
upload_uri = Path(get_cache_dir()) / "debug_images"
upload_uri.mkdir(parents=True, exist_ok=True)
# Verify that we can upload to this destination
upload_uri = str(upload_uri)
storage = StorageHelper.get(upload_uri)
upload_uri = storage.verify_upload(folder_uri=upload_uri)
# noinspection PyProtectedMember
self._task._reporter.report_image_plot_and_upload(
title=title,
series=series,
path=path,
matrix=matrix,
iter=iteration or 0,
upload_uri=upload_uri,
max_image_history=max_image_history if max_image_history is not None else self._default_max_sample_history,
delete_after_upload=delete_after_upload,
)
def _report_file_and_upload(
self,
title: str,
series: str,
iteration: Optional[int] = None,
path: Optional[str] = None,
max_file_history: Optional[int] = None,
delete_after_upload: bool = False,
) -> None:
"""
Upload a file and report it as link in the debug images section.
File is uploaded to a preconfigured storage (see :meth:`Loggerset_default_upload_destination`)
describing the task ID, title, series and iteration.
:param title: Title (AKA metric)
:param series: Series (AKA variant)
:param iteration: Iteration number
:param path: A path to file to be uploaded
:param max_file_history: maximum number of files to store per metric/variant combination \
use negative value for unlimited. default is set in global configuration (default=5)
:param delete_after_upload: if True, one the file was uploaded the local copy will be deleted
"""
# if task was not started, we have to start it
self._start_task_if_needed()
upload_uri = self.get_default_upload_destination()
if not upload_uri:
upload_uri = Path(get_cache_dir()) / "debug_images"
upload_uri.mkdir(parents=True, exist_ok=True)
# Verify that we can upload to this destination
upload_uri = str(upload_uri)
storage = StorageHelper.get(upload_uri)
upload_uri = storage.verify_upload(folder_uri=upload_uri)
# noinspection PyProtectedMember
self._task._reporter.report_image_and_upload(
title=title,
series=series,
path=path,
image=None,
iter=iteration or 0,
upload_uri=upload_uri,
max_image_history=max_file_history if max_file_history is not None else self._default_max_sample_history,
delete_after_upload=delete_after_upload,
)
def _start_task_if_needed(self) -> None:
# deprecated
pass
def _flush_stdout_handler(self) -> None:
if self._task_handler:
self._task_handler.flush()
def _close_stdout_handler(self, wait: bool = True) -> None:
# detach the sys stdout/stderr
if self._connect_std_streams:
StdStreamPatch.remove_std_logger(self)
if self._task_handler:
t = self._task_handler
self._task_handler = None
t.close(wait)
def _touch_title_series(self, title: str, series: str) -> None:
if title not in self._graph_titles:
self._graph_titles[title] = set()
self._graph_titles[title].add(series)
def _get_used_title_series(self) -> dict:
return self._graph_titles
def _get_tensorboard_series_prefix(self) -> Optional[str]:
"""
:return str: return a string prefix to put in front of every report combing from tensorboard
"""
return self._tensorboard_series_force_prefix
def _set_tensorboard_series_prefix(self, prefix: Optional[str]) -> None:
"""
:param str prefix: Set a string prefix to put in front of every report combing from tensorboard
"""
self._tensorboard_series_force_prefix = str(prefix) if prefix else None
@classmethod
def _get_tensorboard_auto_group_scalars(cls) -> bool:
"""
:return: True, if we preserve Tensorboard backward compatibility behaviour,
i.e., scalars without specific title will be under the "Scalars" graph
default is False: Tensorboard scalars without title will have title/series with the same tag
"""
return cls._tensorboard_logging_auto_group_scalars
@classmethod
def _get_tensorboard_single_series_per_graph(cls) -> bool:
"""
:return: True, if we generate a separate graph (plot) for each Tensorboard scalar series
default is False: Tensorboard scalar series will be grouped according to their title
"""
return cls._tensorboard_single_series_per_graph
@classmethod
def _skip_console_log(cls) -> bool:
return bool(running_remotely() and not DEBUG_SIMULATE_REMOTE_TASK.get())
| Logger |
python | PrefectHQ__prefect | src/prefect/_internal/concurrency/calls.py | {
"start": 7919,
"end": 17830
} | class ____(Generic[T]):
"""
A deferred function call.
"""
future: Future[T]
fn: "_SyncOrAsyncCallable[..., T]"
args: tuple[Any, ...]
kwargs: dict[str, Any]
context: contextvars.Context
timeout: Optional[float]
runner: Optional["Portal"] = None
def __eq__(self, other: object) -> bool:
"""this is to avoid attempts at invalid access of args/kwargs in <3.13 stemming from the
auto-generated __eq__ method on the dataclass.
this will no longer be required in 3.13+, see https://github.com/python/cpython/issues/128294
"""
if self is other:
return True
if not isinstance(other, Call):
return NotImplemented
try:
# Attempt to access args/kwargs. If any are missing on self or other,
# an AttributeError will be raised by the access attempt on one of them.
s_args, s_kwargs = self.args, self.kwargs
o_args, o_kwargs = other.args, other.kwargs
except AttributeError:
# If args/kwargs are missing on self or other (and self is not other),
# they are considered not equal. This ensures that a Call with deleted
# args/kwargs compares as different from one that still has them
return False
# If all args/kwargs were accessible on both, proceed with full field comparison.
# Note: self.future == other.future will use Future's __eq__ (default is identity).
return (
(self.future == other.future)
and (self.fn == other.fn)
and (s_args == o_args)
and (s_kwargs == o_kwargs)
and (self.context == other.context)
and (self.timeout == other.timeout)
and (self.runner == other.runner)
)
__hash__ = None # type: ignore
@classmethod
def new(
cls,
__fn: _SyncOrAsyncCallable[P, T],
*args: P.args,
**kwargs: P.kwargs,
) -> Self:
return cls(
future=Future(name=getattr(__fn, "__name__", str(__fn))),
fn=__fn,
args=args,
kwargs=kwargs,
context=contextvars.copy_context(),
timeout=None,
)
def set_timeout(self, timeout: Optional[float] = None) -> None:
"""
Set the timeout for the call.
The timeout begins when the call starts.
"""
if self.future.done() or self.future.running():
raise RuntimeError("Timeouts cannot be added when the call has started.")
self.timeout = timeout
def set_runner(self, portal: "Portal") -> None:
"""
Update the portal used to run this call.
"""
if self.runner is not None:
raise RuntimeError("The portal is already set for this call.")
self.runner = portal
def run(self) -> Optional[Awaitable[None]]:
"""
Execute the call and place the result on the future.
All exceptions during execution of the call are captured.
"""
# Do not execute if the future is cancelled
if not self.future.set_running_or_notify_cancel(self.timeout):
logger.debug("Skipping execution of cancelled call %r", self)
return None
logger.debug(
"Running call %r in thread %r%s",
self,
threading.current_thread().name,
f" with timeout of {self.timeout}s" if self.timeout is not None else "",
)
coro = self.context.run(self._run_sync)
if coro is not None:
loop = get_running_loop()
if loop:
# If an event loop is available, return a task to be awaited
# Note we must create a task for context variables to propagate
logger.debug(
"Scheduling coroutine for call %r in running loop %r",
self,
loop,
)
task = self.context.run(loop.create_task, self._run_async(coro))
# Prevent tasks from being garbage collected before completion
# See https://docs.python.org/3.10/library/asyncio-task.html#asyncio.create_task
_ASYNC_TASK_REFS.add(task)
asyncio.ensure_future(task).add_done_callback(
lambda _: _ASYNC_TASK_REFS.remove(task)
)
return task
else:
# Otherwise, execute the function here
logger.debug("Executing coroutine for call %r in new loop", self)
return self.context.run(asyncio.run, self._run_async(coro))
return None
def result(self, timeout: Optional[float] = None) -> T:
"""
Wait for the result of the call.
Not safe for use from asynchronous contexts.
"""
return self.future.result(timeout=timeout)
async def aresult(self):
"""
Wait for the result of the call.
For use from asynchronous contexts.
"""
try:
return await asyncio.wrap_future(self.future)
except asyncio.CancelledError as exc:
raise CancelledError() from exc
def cancelled(self) -> bool:
"""
Check if the call was cancelled.
"""
return self.future.cancelled()
def timedout(self) -> bool:
"""
Check if the call timed out.
"""
return self.future.timedout()
def cancel(self) -> bool:
return self.future.cancel()
def _run_sync(self) -> Optional[Awaitable[T]]:
cancel_scope = None
try:
with set_current_call(self):
with self.future.enforce_sync_deadline() as cancel_scope:
try:
result = self.fn(*self.args, **self.kwargs)
finally:
# Forget this call's arguments in order to free up any memory
# that may be referenced by them; after a call has happened,
# there's no need to keep a reference to them
with contextlib.suppress(AttributeError):
del self.args, self.kwargs
# Return the coroutine for async execution
if inspect.isawaitable(result):
return result
except CancelledError:
# Report cancellation
# in rare cases, enforce_sync_deadline raises CancelledError
# prior to yielding
if cancel_scope is None:
self.future.cancel()
return None
if cancel_scope.timedout():
setattr(self.future, "_timed_out", True)
self.future.cancel()
elif cancel_scope.cancelled():
self.future.cancel()
else:
raise
except BaseException as exc:
logger.debug("Encountered exception in call %r", self, exc_info=True)
self.future.set_exception(exc)
# Prevent reference cycle in `exc`
del self
else:
self.future.set_result(result) # noqa: F821
logger.debug("Finished call %r", self) # noqa: F821
async def _run_async(self, coro: Awaitable[T]) -> None:
cancel_scope = result = None
try:
with set_current_call(self):
with self.future.enforce_async_deadline() as cancel_scope:
try:
result = await coro
finally:
# Forget this call's arguments in order to free up any memory
# that may be referenced by them; after a call has happened,
# there's no need to keep a reference to them
with contextlib.suppress(AttributeError):
del self.args, self.kwargs
except CancelledError:
# Report cancellation
if TYPE_CHECKING:
assert cancel_scope is not None
if cancel_scope.timedout():
setattr(self.future, "_timed_out", True)
self.future.cancel()
elif cancel_scope.cancelled():
self.future.cancel()
else:
raise
except BaseException as exc:
logger.debug("Encountered exception in async call %r", self, exc_info=True)
self.future.set_exception(exc)
# Prevent reference cycle in `exc`
del self
else:
# F821 ignored because Ruff gets confused about the del self above.
self.future.set_result(result) # noqa: F821
logger.debug("Finished async call %r", self) # noqa: F821
def __call__(self) -> Union[T, Awaitable[T]]:
"""
Execute the call and return its result.
All executions during execution of the call are re-raised.
"""
coro = self.run()
# Return an awaitable if in an async context
if coro is not None:
async def run_and_return_result() -> T:
await coro
return self.result()
return run_and_return_result()
else:
return self.result()
def __repr__(self) -> str:
name = getattr(self.fn, "__name__", str(self.fn))
try:
args, kwargs = self.args, self.kwargs
except AttributeError:
call_args = "<dropped>"
else:
call_args = ", ".join(
[repr(arg) for arg in args]
+ [f"{key}={repr(val)}" for key, val in kwargs.items()]
)
# Enforce a maximum length
if len(call_args) > 100:
call_args = call_args[:100] + "..."
return f"{name}({call_args})"
| Call |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 34554,
"end": 36637
} | class ____(Response):
"""
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
:param metrics: Plot events grouped by tasks and iterations
:type metrics: Sequence[PlotsResponseTaskMetrics]
"""
_service = "events"
_action = "plots"
_version = "2.23"
_schema = {
"properties": {
"metrics": {
"description": "Plot events grouped by tasks and iterations",
"items": {"$ref": "#/definitions/plots_response_task_metrics"},
"type": ["array", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, scroll_id: Optional[str] = None, metrics: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(PlotsResponse, self).__init__(**kwargs)
self.scroll_id = scroll_id
self.metrics = metrics
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("metrics")
def metrics(self) -> Optional[List[Any]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [PlotsResponseTaskMetrics.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metrics", PlotsResponseTaskMetrics, is_array=True)
self._property_metrics = value
| PlotsResponse |
python | wandb__wandb | wandb/sdk/artifacts/_generated/enums.py | {
"start": 246,
"end": 356
} | class ____(str, Enum):
PENDING = "PENDING"
COMMITTED = "COMMITTED"
DELETED = "DELETED"
| ArtifactState |
python | pandas-dev__pandas | pandas/tests/series/methods/test_count.py | {
"start": 74,
"end": 567
} | class ____:
def test_count(self, datetime_series):
assert datetime_series.count() == len(datetime_series)
datetime_series[::2] = np.nan
assert datetime_series.count() == np.isfinite(datetime_series).sum()
def test_count_categorical(self):
ser = Series(
Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
)
result = ser.count()
assert result == 2
| TestSeriesCount |
python | keras-team__keras | keras/src/layers/pooling/average_pooling2d.py | {
"start": 185,
"end": 4121
} | class ____(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> avg_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> avg_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> avg_pool_2d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| AveragePooling2D |
python | Netflix__metaflow | metaflow/plugins/cards/exception.py | {
"start": 3699,
"end": 4122
} | class ____(MetaflowException):
headline = (
"`get_cards` function requires a `Task` object or pathspec as an argument"
)
def __init__(self, obj_type):
msg = (
"`get_cards` function requires a `Task` object or pathspec as an argument. `task` argument cannot be of type %s."
% str(obj_type)
)
super().__init__(msg=msg, lineno=None)
| IncorrectArgumentException |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/spanner.py | {
"start": 17653,
"end": 22044
} | class ____(GoogleCloudBaseOperator):
"""
Updates a Cloud Spanner database with the specified DDL statement.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerUpdateDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param database_id: The Cloud Spanner database ID.
:param ddl_statements: The string list containing DDL to apply to the database.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param operation_id: (Optional) Unique per database operation id that can
be specified to implement idempotency check.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_database_update_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"database_id",
"ddl_statements",
"gcp_conn_id",
"impersonation_chain",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"ddl_statements": "sql"}
# [END gcp_spanner_database_update_template_fields]
operator_extra_links = (SpannerDatabaseLink(),)
def __init__(
self,
*,
instance_id: str,
database_id: str,
ddl_statements: list[str],
project_id: str = PROVIDE_PROJECT_ID,
operation_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.ddl_statements = ddl_statements
self.operation_id = operation_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
if not self.ddl_statements:
raise AirflowException("The required parameter 'ddl_statements' is empty or None")
def execute(self, context: Context) -> None:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if not hook.get_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
):
raise AirflowException(
f"The Cloud Spanner database '{self.database_id}' in project '{self.project_id}' "
f"and instance '{self.instance_id}' is missing. "
f"Create the database first before you can update it."
)
SpannerDatabaseLink.persist(
context=context,
instance_id=self.instance_id,
database_id=self.database_id,
project_id=self.project_id or hook.project_id,
)
return hook.update_database(
project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
ddl_statements=self.ddl_statements,
operation_id=self.operation_id,
)
| SpannerUpdateDatabaseInstanceOperator |
python | cython__cython | Cython/TestUtils.py | {
"start": 7089,
"end": 15572
} | class ____(VisitorTransform):
# actually, a TreeVisitor would be enough, but this needs to run
# as part of the compiler pipeline
def __init__(self):
super().__init__()
self._module_pos = None
self._c_patterns = []
self._c_antipatterns = []
def create_c_file_validator(self):
patterns, antipatterns = self._c_patterns, self._c_antipatterns
def fail(pos, pattern, found, file_path):
Errors.error(pos, "Pattern '%s' %s found in %s" %(
pattern,
'was' if found else 'was not',
file_path,
))
def extract_section(file_path, content, start, end):
if start:
split = re.search(start, content)
if split:
content = content[split.end():]
else:
fail(self._module_pos, start, found=False, file_path=file_path)
if end:
split = re.search(end, content)
if split:
content = content[:split.start()]
else:
fail(self._module_pos, end, found=False, file_path=file_path)
return content
def validate_file_content(file_path, content):
for pattern in patterns:
#print("Searching pattern '%s'" % pattern)
start, end, pattern = _parse_pattern(pattern)
section = extract_section(file_path, content, start, end)
if not re.search(pattern, section):
fail(self._module_pos, pattern, found=False, file_path=file_path)
for antipattern in antipatterns:
#print("Searching antipattern '%s'" % antipattern)
start, end, antipattern = _parse_pattern(antipattern)
section = extract_section(file_path, content, start, end)
if re.search(antipattern, section):
fail(self._module_pos, antipattern, found=True, file_path=file_path)
def validate_c_file(result):
c_file = result.c_file
if not (patterns or antipatterns):
#print("No patterns defined for %s" % c_file)
return result
with open(c_file, encoding='utf8') as f:
content = f.read()
content = _strip_c_comments(content)
validate_file_content(c_file, content)
return validate_c_file
def _check_directives(self, node):
directives = node.directives
if 'test_assert_path_exists' in directives:
for path in directives['test_assert_path_exists']:
if TreePath.find_first(node, path) is None:
Errors.error(
node.pos,
"Expected path '%s' not found in result tree" % path)
if 'test_fail_if_path_exists' in directives:
for path in directives['test_fail_if_path_exists']:
first_node = TreePath.find_first(node, path)
if first_node is not None:
Errors.error(
first_node.pos,
"Unexpected path '%s' found in result tree" % path)
if 'test_assert_c_code_has' in directives:
self._c_patterns.extend(directives['test_assert_c_code_has'])
if 'test_fail_if_c_code_has' in directives:
self._c_antipatterns.extend(directives['test_fail_if_c_code_has'])
if 'test_body_needs_exception_handling' in directives:
value = directives['test_body_needs_exception_handling']
if value is not None:
from .Compiler.ParseTreeTransforms import HasNoExceptionHandlingVisitor
visitor = HasNoExceptionHandlingVisitor()
result = not visitor(node.body)
if value != result:
visitor(node.body)
Errors.error(
node.pos,
"Node had unexpected exception handling value"
)
def visit_ModuleNode(self, node):
self._module_pos = node.pos
self._check_directives(node)
self.visitchildren(node)
return node
def visit_CompilerDirectivesNode(self, node):
self._check_directives(node)
self.visitchildren(node)
return node
visit_Node = VisitorTransform.recurse_to_children
def unpack_source_tree(tree_file, workdir, cython_root):
programs = {
'PYTHON': [sys.executable],
'CYTHON': [sys.executable, os.path.join(cython_root, 'cython.py')],
'CYTHONIZE': [sys.executable, os.path.join(cython_root, 'cythonize.py')]
}
if workdir is None:
workdir = tempfile.mkdtemp()
header, cur_file = [], None
with open(tree_file, 'rb') as f:
try:
for line in f:
if line[:5] == b'#####':
filename = line.strip().strip(b'#').strip().decode('utf8').replace('/', os.path.sep)
path = os.path.join(workdir, filename)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if cur_file is not None:
to_close, cur_file = cur_file, None
to_close.close()
cur_file = open(path, 'wb')
elif cur_file is not None:
cur_file.write(line)
elif line.strip() and not line.lstrip().startswith(b'#'):
if line.strip() not in (b'"""', b"'''"):
command = shlex.split(line.decode('utf8'))
if not command: continue
# In Python 3: prog, *args = command
prog, args = command[0], command[1:]
try:
header.append(programs[prog]+args)
except KeyError:
header.append(command)
finally:
if cur_file is not None:
cur_file.close()
return workdir, header
def write_file(file_path, content, dedent=False, encoding=None):
r"""Write some content (text or bytes) to the file
at `file_path` without translating `'\n'` into `os.linesep`.
The default encoding is `'utf-8'`.
"""
if isinstance(content, bytes):
mode = "wb"
# binary mode doesn't take an encoding and newline arguments
newline = None
default_encoding = None
else:
mode = "w"
# any "\n" characters written are not translated
# to the system default line separator, os.linesep
newline = "\n"
default_encoding = "utf-8"
if encoding is None:
encoding = default_encoding
if dedent:
content = textwrap.dedent(content)
with open(file_path, mode=mode, encoding=encoding, newline=newline) as f:
f.write(content)
def write_newer_file(file_path, newer_than, content, dedent=False, encoding=None):
r"""
Write `content` to the file `file_path` without translating `'\n'`
into `os.linesep` and make sure it is newer than the file `newer_than`.
The default encoding is `'utf-8'` (same as for `write_file`).
"""
write_file(file_path, content, dedent=dedent, encoding=encoding)
try:
other_time = os.path.getmtime(newer_than)
except OSError:
# Support writing a fresh file (which is always newer than a non-existent one)
other_time = None
while other_time is None or other_time >= os.path.getmtime(file_path):
write_file(file_path, content, dedent=dedent, encoding=encoding)
def py_parse_code(code):
"""
Compiles code far enough to get errors from the parser and post-parse stage.
Is useful for checking for syntax errors, however it doesn't generate runable
code.
"""
context = StringParseContext("test")
# all the errors we care about are in the parsing or postparse stage
try:
with Errors.local_errors() as errors:
result = TreeFragment(code, pipeline=[PostParse(context)])
result = result.substitute()
if errors:
raise errors[0] # compile error, which should get caught below
else:
return result
except Errors.CompileError as e:
raise SyntaxError(e.message_only)
| TreeAssertVisitor |
python | google__pytype | pytype/pytd/printer.py | {
"start": 324,
"end": 1339
} | class ____:
"""Imports from the `typing` module."""
def __init__(self):
# Typing members that are imported via `from typing import ...`.
self._members: dict[_AliasType, _NameType] = {}
# The number of times that each typing member is used.
self._counts: dict[_NameType, int] = collections.defaultdict(int)
@property
def members(self):
# Note that when a typing member has multiple aliases, this keeps only one.
return {name: alias for alias, name in self._members.items()}
def add(self, name: str, alias: str):
self._counts[name] += 1
self._members[alias] = name
def decrement_count(self, name: str):
self._counts[name] -= 1
def to_import_statements(self):
targets = []
for alias, name in self._members.items():
if not self._counts[name]:
continue
targets.append(f"{name} as {alias}" if alias != name else name)
if targets:
return ["from typing import " + ", ".join(sorted(targets))]
else:
return []
| _TypingImports |
python | pytorch__pytorch | torch/distributed/nn/functional.py | {
"start": 15357,
"end": 15844
} | class ____(Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, op, group, tensor):
ctx.group = group
ctx.op = op
tensor = tensor.clone(memory_format=torch.contiguous_format)
dist.all_reduce(tensor, op=op, group=group)
return tensor
@staticmethod
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
return (None, None) + (_AllReduce.apply(ctx.op, ctx.group, grad_output),)
| _AllReduce |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/emr/types.py | {
"start": 194,
"end": 715
} | class ____(PyEnum):
"""Cluster state for EMR."""
Starting = "STARTING"
Bootstrapping = "BOOTSTRAPPING"
Running = "RUNNING"
Waiting = "WAITING"
Terminating = "TERMINATING"
Terminated = "TERMINATED"
TerminatedWithErrors = "TERMINATED_WITH_ERRORS"
EMR_CLUSTER_TERMINATED_STATES = [
EmrClusterState.Terminating,
EmrClusterState.Terminated,
EmrClusterState.TerminatedWithErrors,
]
EMR_CLUSTER_DONE_STATES = EMR_CLUSTER_TERMINATED_STATES + [EmrClusterState.Waiting]
| EmrClusterState |
python | apache__airflow | providers/slack/src/airflow/providers/slack/operators/slack.py | {
"start": 1290,
"end": 3972
} | class ____(BaseOperator):
"""
Base Slack Operator class.
:param slack_conn_id: :ref:`Slack API Connection <howto/connection:slack>`
which its password is Slack API token.
:param method: The Slack API Method to Call (https://api.slack.com/methods).
:param api_params: API Method call parameters (https://api.slack.com/methods). Optional
:param timeout: The maximum number of seconds the client will wait to connect
and receive a response from Slack. Optional
:param base_url: A string representing the Slack API base URL. Optional
:param proxy: Proxy to make the Slack API call. Optional
:param retry_handlers: List of handlers to customize retry logic in ``slack_sdk.WebClient``. Optional
"""
def __init__(
self,
*,
slack_conn_id: str = SlackHook.default_conn_name,
method: str,
api_params: dict | None = None,
base_url: str | None = None,
proxy: str | None = None,
timeout: int | None = None,
retry_handlers: list[RetryHandler] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.slack_conn_id = slack_conn_id
self.method = method
self.api_params = api_params
self.base_url = base_url
self.timeout = timeout
self.proxy = proxy
self.retry_handlers = retry_handlers
@cached_property
def hook(self) -> SlackHook:
"""Slack Hook."""
return SlackHook(
slack_conn_id=self.slack_conn_id,
base_url=self.base_url,
timeout=self.timeout,
proxy=self.proxy,
retry_handlers=self.retry_handlers,
)
def construct_api_call_params(self) -> Any:
"""
Construct API call parameters used by the execute function.
Allow templating on the source fields of the ``api_call_params`` dict
before construction.
Child classes should override this. Each SlackAPIOperator child class is
responsible for having function set ``self.api_call_params`` with a dict
of API call parameters (https://api.slack.com/methods)
"""
raise NotImplementedError(
"SlackAPIOperator should not be used directly. Chose one of the subclasses instead"
)
def execute(self, context: Context):
if not self.method:
msg = f"Expected non empty `method` attribute in {type(self).__name__!r}, but got {self.method!r}"
raise ValueError(msg)
if not self.api_params:
self.construct_api_call_params()
self.hook.call(self.method, json=self.api_params)
| SlackAPIOperator |
python | tox-dev__tox | src/tox/config/loader/convert.py | {
"start": 537,
"end": 6901
} | class ____(ABC, Generic[T]):
"""A class that converts a raw type to a given tox (python) type."""
def to(self, raw: T, of_type: type[V] | UnionType, factory: Factory[V]) -> V: # noqa: PLR0911
"""
Convert given raw type to python type.
:param raw: the raw type
:param of_type: python type
:param factory: factory method to build the object
:return: the converted type
"""
from_module = getattr(of_type, "__module__", None)
if (
from_module in {"typing", "typing_extensions"}
or of_type.__class__ == UnionType
or (hasattr(typing, "GenericAlias") and isinstance(of_type, typing.GenericAlias))
):
return self._to_typing(raw, of_type, factory)
if isclass(of_type):
if issubclass(of_type, Path):
return self.to_path(raw) # type: ignore[return-value]
if issubclass(of_type, bool):
return self.to_bool(raw) # type: ignore[return-value]
if issubclass(of_type, Command):
return self.to_command(raw) # type: ignore[return-value]
if issubclass(of_type, EnvList):
return self.to_env_list(raw) # type: ignore[return-value]
if issubclass(of_type, str):
return self.to_str(raw) # type: ignore[return-value]
if isinstance(raw, cast("type[V]", of_type)): # already target type no need to transform it
# do it this late to allow normalization - e.g. string strip
return raw
if factory:
return factory(raw)
return cast("type[V]", of_type)(raw) # type: ignore[call-arg]
def _to_typing(self, raw: T, of_type: type[V] | UnionType, factory: Factory[V]) -> V: # noqa: C901
origin = getattr(of_type, "__origin__", of_type.__class__)
result: Any = _NO_MAPPING
if origin in {list, list}:
entry_type = cast("type[V]", of_type).__args__[0] # type: ignore[attr-defined]
result = [self.to(i, entry_type, factory) for i in self.to_list(raw, entry_type)]
if isclass(entry_type) and issubclass(entry_type, Command):
result = [i for i in result if i is not None]
elif origin in {set, set}:
entry_type = cast("type[V]", of_type).__args__[0] # type: ignore[attr-defined]
result = {self.to(i, entry_type, factory) for i in self.to_set(raw, entry_type)}
elif origin in {dict, dict}:
key_type, value_type = cast("type[V]", of_type).__args__[0], cast("type[V]", of_type).__args__[1] # type: ignore[attr-defined]
result = OrderedDict(
(self.to(k, key_type, factory), self.to(v, value_type, factory))
for k, v in self.to_dict(raw, (key_type, value_type))
)
elif origin in {Union, UnionType}: # handle Optional values
args: list[type[Any]] = of_type.__args__ # type: ignore[union-attr,assignment]
none = type(None)
if len(args) == 2 and none in args: # noqa: PLR2004
if isinstance(raw, str):
raw = raw.strip() # type: ignore[assignment]
if not raw:
result = None
else:
new_type = next(i for i in args if i != none) # pragma: no cover
result = self.to(raw, new_type, factory)
elif origin in {Literal, type(Literal)}:
choice = cast("type[V]", of_type).__args__ # type: ignore[attr-defined]
if raw not in choice:
msg = f"{raw} must be one of {choice}"
raise ValueError(msg)
result = raw
if result is not _NO_MAPPING:
return cast("V", result)
msg = f"{raw} cannot cast to {of_type!r}"
raise TypeError(msg)
@staticmethod
@abstractmethod
def to_str(value: T) -> str:
"""
Convert to string.
:param value: the value to convert
:returns: a string representation of the value
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def to_bool(value: T) -> bool:
"""
Convert to boolean.
:param value: the value to convert
:returns: a boolean representation of the value
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def to_list(value: T, of_type: type[Any]) -> Iterator[T]:
"""
Convert to list.
:param value: the value to convert
:param of_type: the type of elements in the list
:returns: a list representation of the value
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def to_set(value: T, of_type: type[Any]) -> Iterator[T]:
"""
Convert to set.
:param value: the value to convert
:param of_type: the type of elements in the set
:returns: a set representation of the value
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def to_dict(value: T, of_type: tuple[type[Any], type[Any]]) -> Iterator[tuple[T, T]]:
"""
Convert to dictionary.
:param value: the value to convert
:param of_type: a tuple indicating the type of the key and the value
:returns: a iteration of key-value pairs that gets populated into a dict
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def to_path(value: T) -> Path:
"""
Convert to path.
:param value: the value to convert
:returns: path representation of the value
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def to_command(value: T) -> Command | None:
"""
Convert to a command to execute.
:param value: the value to convert
:returns: command representation of the value
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def to_env_list(value: T) -> EnvList:
"""
Convert to a tox EnvList.
:param value: the value to convert
:returns: a list of tox environments from the value
"""
raise NotImplementedError
__all__ = [
"Convert",
"Factory",
]
| Convert |
python | django__django | tests/logging_tests/tests.py | {
"start": 7738,
"end": 8291
} | class ____(SetupDefaultLoggingMixin, LoggingCaptureMixin, SimpleTestCase):
def test_i18n_page_found_no_warning(self):
self.client.get("/exists/")
self.client.get("/en/exists/")
self.assertEqual(self.logger_output.getvalue(), "")
def test_i18n_page_not_found_warning(self):
self.client.get("/this_does_not/")
self.client.get("/en/nor_this/")
self.assertEqual(
self.logger_output.getvalue(),
"Not Found: /this_does_not/\nNot Found: /en/nor_this/\n",
)
| I18nLoggingTests |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 3342,
"end": 3394
} | class ____:
def f(self):
print("D")
| ParentD |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/ccroot.py | {
"start": 16598,
"end": 16788
} | class ____(link_task):
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
return Task.SKIP_ME
| fake_shlib |
python | pydata__xarray | xarray/computation/arithmetic.py | {
"start": 3630,
"end": 3781
} | class ____(
ImplementsDatasetReduce,
SupportsArithmetic,
DatasetOpsMixin,
):
__slots__ = ()
__array_priority__ = 50
| DatasetArithmetic |
python | ray-project__ray | python/ray/serve/tests/unit/test_metrics_utils.py | {
"start": 9884,
"end": 30163
} | class ____:
"""Test the new instantaneous merge functionality."""
def test_merge_instantaneous_total_empty(self):
"""Test merge_instantaneous_total with empty input."""
result = merge_instantaneous_total([])
assert result == []
result = merge_instantaneous_total([[], []])
assert result == []
def test_merge_instantaneous_total_single_replica(self):
"""Test merge_instantaneous_total with single replica."""
series = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 7.0),
TimeStampedValue(3.0, 3.0),
]
result = merge_instantaneous_total([series])
expected = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 7.0),
TimeStampedValue(3.0, 3.0),
]
assert_timeseries_equal(result, expected)
def test_merge_instantaneous_total_two_replicas(self):
"""Test merge_instantaneous_total with two replicas."""
series1 = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(3.0, 7.0),
]
series2 = [
TimeStampedValue(2.0, 3.0),
TimeStampedValue(4.0, 1.0),
]
result = merge_instantaneous_total([series1, series2])
# Expected: t=1.0: +5 (total=5), t=2.0: +3 (total=8), t=3.0: +2 (total=10), t=4.0: -2 (total=8)
expected = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 8.0),
TimeStampedValue(3.0, 10.0),
TimeStampedValue(4.0, 8.0),
]
assert_timeseries_equal(result, expected)
def test_merge_instantaneous_total_complex_scenario(self):
"""Test complex scenario matching the autoscaling example."""
# r1: starts at 5 (t=0.2), changes to 7 (t=0.8), then 6 (t=1.5)
series1 = [
TimeStampedValue(0.2, 5.0),
TimeStampedValue(0.8, 7.0),
TimeStampedValue(1.5, 6.0),
]
# r2: starts at 3 (t=0.1), changes to 4 (t=0.9), then 8 (t=1.2)
series2 = [
TimeStampedValue(0.1, 3.0),
TimeStampedValue(0.9, 4.0),
TimeStampedValue(1.2, 8.0),
]
result = merge_instantaneous_total([series1, series2])
expected = [
TimeStampedValue(0.1, 3.0), # r2 starts
TimeStampedValue(0.2, 8.0), # r1 starts: 3+5=8
TimeStampedValue(0.8, 10.0), # r1 changes: 8+(7-5)=10
TimeStampedValue(0.9, 11.0), # r2 changes: 10+(4-3)=11
TimeStampedValue(1.2, 15.0), # r2 changes: 11+(8-4)=15
TimeStampedValue(1.5, 14.0), # r1 changes: 15+(6-7)=14
]
assert_timeseries_equal(result, expected)
def test_time_weighted_average_empty(self):
"""Test time_weighted_average with empty series."""
result = time_weighted_average([], 0.0, 1.0)
assert result is None
def test_time_weighted_average_no_overlap(self):
"""Test time_weighted_average with no data overlap."""
series = [TimeStampedValue(2.0, 5.0)]
result = time_weighted_average(series, 0.0, 1.0)
assert result == 0.0 # Default value before first point
def test_time_weighted_average_constant_value(self):
"""Test time_weighted_average with constant value."""
series = [TimeStampedValue(0.5, 10.0)]
result = time_weighted_average(series, 1.0, 2.0)
assert result == 10.0
def test_time_weighted_average_step_function(self):
"""Test time_weighted_average with step function."""
series = [
TimeStampedValue(0.0, 5.0),
TimeStampedValue(1.0, 10.0),
TimeStampedValue(2.0, 15.0),
]
# Average over [0.5, 1.5): 0.5s at value 5, 0.5s at value 10
result = time_weighted_average(series, 0.5, 1.5)
expected = (5.0 * 0.5 + 10.0 * 0.5) / 1.0
assert abs(result - expected) < 1e-10
def test_time_weighted_average_none_window_start(self):
"""Test time_weighted_average with None window_start."""
series = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 10.0),
TimeStampedValue(3.0, 15.0),
]
# Should use full series from start (t=1.0) to window_end (t=2.5)
result = time_weighted_average(series, None, 2.5)
# 1.0s at value 5 (from 1.0 to 2.0), 0.5s at value 10 (from 2.0 to 2.5)
expected = (5.0 * 1.0 + 10.0 * 0.5) / 1.5
assert abs(result - expected) < 1e-10
def test_time_weighted_average_none_window_end(self):
"""Test time_weighted_average with None window_end."""
series = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 10.0),
TimeStampedValue(3.0, 15.0),
]
# Should use from window_start (t=1.5) to end of series (t=3.0+1.0=4.0)
result = time_weighted_average(series, 1.5, None)
# 0.5s at value 5 (from 1.5 to 2.0), 1.0s at value 10 (from 2.0 to 3.0), 1.0s at value 15 (from 3.0 to 4.0)
expected = (5.0 * 0.5 + 10.0 * 1.0 + 15.0 * 1.0) / 2.5
assert abs(result - expected) < 1e-10
def test_time_weighted_average_both_none(self):
"""Test time_weighted_average with both window_start and window_end None."""
series = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 10.0),
TimeStampedValue(3.0, 15.0),
]
# Should use full series from t=1.0 to t=3.0+1.0=4.0
result = time_weighted_average(series, None, None)
# 1.0s at value 5, 1.0s at value 10, 1.0s at value 15
expected = (5.0 * 1.0 + 10.0 * 1.0 + 15.0 * 1.0) / 3.0
assert abs(result - expected) < 1e-10
def test_time_weighted_average_single_point_none_bounds(self):
"""Test time_weighted_average with single point and None bounds."""
series = [TimeStampedValue(2.0, 10.0)]
result = time_weighted_average(series, None, None)
# Single point with 1.0s duration (from 2.0 to 3.0)
assert result == 10.0
def test_time_weighted_average_custom_last_window_s(self):
"""Test time_weighted_average with custom last_window_s parameter."""
series = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 10.0),
TimeStampedValue(3.0, 15.0),
]
# Test with last_window_s=2.0 (double the default)
result_2s = time_weighted_average(series, None, None, last_window_s=2.0)
# Should use from t=1.0 to t=3.0+2.0=5.0
# 1.0s at value 5 (from 1.0 to 2.0), 1.0s at value 10 (from 2.0 to 3.0), 2.0s at value 15 (from 3.0 to 5.0)
expected_2s = (5.0 * 1.0 + 10.0 * 1.0 + 15.0 * 2.0) / 4.0
assert abs(result_2s - expected_2s) < 1e-10
# Test with last_window_s=0.5 (half the default)
result_0_5s = time_weighted_average(series, None, None, last_window_s=0.5)
# Should use from t=1.0 to t=3.0+0.5=3.5
# 1.0s at value 5 (from 1.0 to 2.0), 1.0s at value 10 (from 2.0 to 3.0), 0.5s at value 15 (from 3.0 to 3.5)
expected_0_5s = (5.0 * 1.0 + 10.0 * 1.0 + 15.0 * 0.5) / 2.5
assert abs(result_0_5s - expected_0_5s) < 1e-10
# Test with window_start specified but window_end None - should still use last_window_s
result_with_start = time_weighted_average(series, 1.5, None, last_window_s=3.0)
# Should use from t=1.5 to t=3.0+3.0=6.0
# 0.5s at value 5 (from 1.5 to 2.0), 1.0s at value 10 (from 2.0 to 3.0), 3.0s at value 15 (from 3.0 to 6.0)
expected_with_start = (5.0 * 0.5 + 10.0 * 1.0 + 15.0 * 3.0) / 4.5
assert abs(result_with_start - expected_with_start) < 1e-10
# Test that last_window_s is ignored when window_end is explicitly provided
result_explicit_end = time_weighted_average(
series, None, 4.0, last_window_s=10.0
)
# Should use from t=1.0 to t=4.0 (ignoring last_window_s=10.0)
# 1.0s at value 5 (from 1.0 to 2.0), 1.0s at value 10 (from 2.0 to 3.0), 1.0s at value 15 (from 3.0 to 4.0)
expected_explicit_end = (5.0 * 1.0 + 10.0 * 1.0 + 15.0 * 1.0) / 3.0
assert abs(result_explicit_end - expected_explicit_end) < 1e-10
def test_merge_timeseries_dicts_instantaneous_basic(self):
"""Test merge_timeseries_dicts basic functionality with instantaneous approach."""
s1 = InMemoryMetricsStore()
s2 = InMemoryMetricsStore()
s1.add_metrics_point({"metric1": 5, "metric2": 10}, timestamp=1.0)
s1.add_metrics_point({"metric1": 7}, timestamp=2.0)
s2.add_metrics_point({"metric1": 3, "metric3": 20}, timestamp=1.5)
result = merge_timeseries_dicts(s1.data, s2.data)
# metric1: s1 starts at 5 (t=1.0), s2 starts at 3 (t=1.5), s1 changes to 7 (t=2.0)
expected_metric1 = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(1.5, 8.0), # 5+3=8
TimeStampedValue(2.0, 10.0), # 3+(7-5)=10
]
assert_timeseries_equal(result["metric1"], expected_metric1)
# metric2: only from s1
expected_metric2 = [TimeStampedValue(1.0, 10.0)]
assert_timeseries_equal(result["metric2"], expected_metric2)
# metric3: only from s2
expected_metric3 = [TimeStampedValue(1.5, 20.0)]
assert_timeseries_equal(result["metric3"], expected_metric3)
def test_merge_instantaneous_vs_windowed_comparison(self):
"""Compare instantaneous merge vs windowed approach."""
# Create test data that highlights the difference
s1 = InMemoryMetricsStore()
s2 = InMemoryMetricsStore()
# Replica 1: 10 requests at t=0.1, then 5 at t=0.9
s1.add_metrics_point({"requests": 10}, timestamp=0.1)
s1.add_metrics_point({"requests": 5}, timestamp=0.9)
# Replica 2: 3 requests at t=0.5, then 8 at t=1.1
s2.add_metrics_point({"requests": 3}, timestamp=0.5)
s2.add_metrics_point({"requests": 8}, timestamp=1.1)
# Instantaneous approach
instantaneous = merge_timeseries_dicts(s1.data, s2.data)
# Instantaneous should have: t=0.1: 10, t=0.5: 13, t=0.9: 8, t=1.1: 13
expected_instantaneous = [
TimeStampedValue(0.1, 10.0),
TimeStampedValue(0.5, 13.0), # 10+3=13
TimeStampedValue(0.9, 8.0), # 3+(5-10)=8
TimeStampedValue(1.1, 13.0), # 5+(8-3)=13
]
assert_timeseries_equal(instantaneous["requests"], expected_instantaneous)
def test_instantaneous_merge_handles_zero_deltas(self):
"""Test that zero deltas are properly filtered out."""
series1 = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 5.0), # No change
TimeStampedValue(3.0, 7.0),
]
series2 = [
TimeStampedValue(1.5, 3.0),
TimeStampedValue(2.5, 3.0), # No change
]
result = merge_instantaneous_total([series1, series2])
# Should skip zero deltas
expected = [
TimeStampedValue(1.0, 5.0),
TimeStampedValue(1.5, 8.0), # 5+3=8
TimeStampedValue(3.0, 10.0), # 8+(7-5)=10
]
assert_timeseries_equal(result, expected)
def test_instantaneous_merge_with_epoch_times(self):
"""Test instantaneous merge with realistic epoch timestamps."""
# Use realistic epoch times (around current time)
base_time = 1703980800.0 # December 30, 2023 16:00:00 UTC
# Simulate 3 replicas reporting metrics over a 30-second period
replica1_series = [
TimeStampedValue(base_time + 0.0, 12.0), # t=0s: 12 running requests
TimeStampedValue(base_time + 5.2, 15.0), # t=5.2s: increased to 15
TimeStampedValue(base_time + 18.7, 8.0), # t=18.7s: dropped to 8
TimeStampedValue(base_time + 25.1, 11.0), # t=25.1s: back up to 11
]
replica2_series = [
TimeStampedValue(base_time + 1.3, 7.0), # t=1.3s: 7 running requests
TimeStampedValue(base_time + 8.9, 9.0), # t=8.9s: increased to 9
TimeStampedValue(base_time + 22.4, 4.0), # t=22.4s: dropped to 4
]
replica3_series = [
TimeStampedValue(base_time + 3.1, 5.0), # t=3.1s: 5 running requests
TimeStampedValue(base_time + 12.6, 8.0), # t=12.6s: increased to 8
TimeStampedValue(base_time + 20.8, 6.0), # t=20.8s: dropped to 6
TimeStampedValue(base_time + 28.3, 9.0), # t=28.3s: increased to 9
]
# Merge all replicas
result = merge_instantaneous_total(
[replica1_series, replica2_series, replica3_series]
)
# Expected timeline of instantaneous totals:
expected = [
TimeStampedValue(base_time + 0.0, 12.0), # r1 starts: 12
TimeStampedValue(base_time + 1.3, 19.0), # r2 starts: 12+7=19
TimeStampedValue(base_time + 3.1, 24.0), # r3 starts: 19+5=24
TimeStampedValue(base_time + 5.2, 27.0), # r1 changes: 24+(15-12)=27
TimeStampedValue(base_time + 8.9, 29.0), # r2 changes: 27+(9-7)=29
TimeStampedValue(base_time + 12.6, 32.0), # r3 changes: 29+(8-5)=32
TimeStampedValue(base_time + 18.7, 25.0), # r1 changes: 32+(8-15)=25
TimeStampedValue(base_time + 20.8, 23.0), # r3 changes: 25+(6-8)=23
TimeStampedValue(base_time + 22.4, 18.0), # r2 changes: 23+(4-9)=18
TimeStampedValue(base_time + 25.1, 21.0), # r1 changes: 18+(11-8)=21
TimeStampedValue(base_time + 28.3, 24.0), # r3 changes: 21+(9-6)=24
]
assert_timeseries_equal(result, expected)
# Test time-weighted average over different intervals
# Full series average
full_avg = time_weighted_average(result, None, None)
assert full_avg is not None
assert full_avg > 0
# Average over first 10 seconds
early_avg = time_weighted_average(result, base_time, base_time + 10.0)
assert early_avg is not None
# Average over last 10 seconds
late_avg = time_weighted_average(result, base_time + 20.0, base_time + 30.0)
assert late_avg is not None
# Verify the averages make sense relative to each other
# (early period has higher values, so early_avg should be > late_avg)
assert early_avg > late_avg
print(f"Full series average: {full_avg:.2f}")
print(f"Early period average (0-10s): {early_avg:.2f}")
print(f"Late period average (20-30s): {late_avg:.2f}")
def test_merge_instantaneous_total_timestamp_rounding(self):
"""Test that timestamps are rounded to 10ms precision."""
series1 = [
TimeStampedValue(1.001234, 5.0), # Should round to 1.00
TimeStampedValue(2.005678, 7.0), # Should round to 2.01
TimeStampedValue(3.009999, 3.0), # Should round to 3.01
]
series2 = [
TimeStampedValue(1.504321, 2.0), # Should round to 1.50
TimeStampedValue(2.008765, 4.0), # Should round to 2.01
]
result = merge_instantaneous_total([series1, series2])
# Verify timestamps are rounded to 2 decimal places (10ms precision)
expected_timestamps = [1.00, 1.50, 2.01, 3.01]
actual_timestamps = [point.timestamp for point in result]
assert len(actual_timestamps) == len(expected_timestamps)
for actual, expected in zip(actual_timestamps, expected_timestamps):
assert actual == expected, f"Expected {expected}, got {actual}"
# Verify values are correct with rounded timestamps
expected = [
TimeStampedValue(1.00, 5.0), # series1 starts
TimeStampedValue(1.50, 7.0), # series2 starts: 5+2=7
TimeStampedValue(
2.01, 11.0
), # s1 becomes 7, s2 becomes 4. Total: 7 + 4 = 11.0
TimeStampedValue(3.01, 7.0), # series1 changes: 11+(3-7)=7
]
assert_timeseries_equal(result, expected)
def test_merge_instantaneous_total_combine_same_timestamp(self):
"""Test that datapoints with same rounded timestamp are combined."""
# Create series where multiple events round to the same timestamp
series1 = [
TimeStampedValue(1.001, 5.0), # Rounds to 1.00
TimeStampedValue(1.004, 7.0), # Also rounds to 1.00
TimeStampedValue(2.000, 10.0), # Rounds to 2.00
]
series2 = [
TimeStampedValue(1.002, 3.0), # Rounds to 1.00
TimeStampedValue(1.005, 4.0), # Also rounds to 1.00
]
result = merge_instantaneous_total([series1, series2])
# Should only have unique rounded timestamps
timestamps = [point.timestamp for point in result]
assert timestamps == [
1.00,
2.00,
], f"Expected [1.00, 2.00], got {timestamps}"
# The value at 1.00 should be the final state after all changes at that rounded time
# Order of events at rounded timestamp 1.00:
# - series1: 0->5 (t=1.001)
# - series2: 0->3 (t=1.002)
# - series1: 5->7 (t=1.004)
# - series2: 3->4 (t=1.005)
# Final state: series1=7, series2=4, total=11
expected = [
TimeStampedValue(1.00, 11.0), # Final combined state at rounded timestamp
TimeStampedValue(2.00, 14.0), # series1 changes: 11+(10-7)=14
]
assert_timeseries_equal(result, expected)
def test_merge_instantaneous_total_edge_cases_rounding(self):
"""Test edge cases for timestamp rounding and combination."""
# Test rounding edge cases with multiple replicas (rounding only happens with 2+ replicas)
series1 = [
TimeStampedValue(1.004999, 5.0), # Should round to 1.0
TimeStampedValue(1.005000, 7.0), # Should round to 1.0 (round half to even)
TimeStampedValue(1.005001, 9.0), # Should round to 1.01
]
series2 = [
TimeStampedValue(1.003, 2.0), # Should round to 1.0
]
result = merge_instantaneous_total([series1, series2])
# Should have two distinct rounded timestamps
expected_timestamps = [1.0, 1.01]
actual_timestamps = [point.timestamp for point in result]
assert actual_timestamps == expected_timestamps
# Values should reflect the changes
# At rounded timestamp 1.0:
# - series2 starts at 2 (t=1.003)
# - series1 starts at 5 (t=1.004999), total: 2+5=7
# - series1 changes to 7 (t=1.005000), total: 2+7=9
# At rounded timestamp 1.01:
# - series1 changes to 9 (t=1.005001), total: 2+9=11
expected = [
TimeStampedValue(1.0, 9.0), # Final state at rounded timestamp 1.0
TimeStampedValue(1.01, 11.0), # State after change at 1.005001
]
assert_timeseries_equal(result, expected)
def test_merge_instantaneous_total_no_changes_filtered(self):
"""Test that zero-change events are filtered even with rounding."""
# Use multiple replicas to trigger the merge logic (single replica returns as-is)
series1 = [
TimeStampedValue(1.001, 5.0), # Rounds to 1.00
TimeStampedValue(1.004, 5.0), # Also rounds to 1.00, no change
TimeStampedValue(2.000, 7.0), # Rounds to 2.00, change
]
series2 = [
TimeStampedValue(1.002, 3.0), # Rounds to 1.00
TimeStampedValue(2.001, 3.0), # Rounds to 2.00, no change
]
result = merge_instantaneous_total([series1, series2])
# Should only include points where total value actually changed
# At 1.00: series2 starts at 3, then series1 starts at 5, total = 8
# At 2.00: series1 changes to 7, total = 3+7 = 10
expected = [
TimeStampedValue(1.00, 8.0), # Initial combined value
TimeStampedValue(2.00, 10.0), # Value change
]
assert_timeseries_equal(result, expected)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestInstantaneousMerge |
python | tiangolo__fastapi | tests/test_generic_parameterless_depends.py | {
"start": 235,
"end": 1875
} | class ____:
pass
@app.get("/a")
async def a(dep: Dep[A]):
return {"cls": dep.__class__.__name__}
@app.get("/b")
async def b(dep: Dep[B]):
return {"cls": dep.__class__.__name__}
client = TestClient(app)
def test_generic_parameterless_depends():
response = client.get("/a")
assert response.status_code == 200, response.text
assert response.json() == {"cls": "A"}
response = client.get("/b")
assert response.status_code == 200, response.text
assert response.json() == {"cls": "B"}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"info": {"title": "FastAPI", "version": "0.1.0"},
"openapi": "3.1.0",
"paths": {
"/a": {
"get": {
"operationId": "a_a_get",
"responses": {
"200": {
"content": {"application/json": {"schema": {}}},
"description": "Successful Response",
}
},
"summary": "A",
}
},
"/b": {
"get": {
"operationId": "b_b_get",
"responses": {
"200": {
"content": {"application/json": {"schema": {}}},
"description": "Successful Response",
}
},
"summary": "B",
}
},
},
}
| B |
python | sympy__sympy | sympy/physics/quantum/tests/test_state.py | {
"start": 872,
"end": 966
} | class ____(Ket):
@classmethod
def default_args(self):
return ("test",)
| CustomKet |
python | plotly__plotly.py | plotly/basedatatypes.py | {
"start": 216282,
"end": 228101
} | class ____(BaseTraceHierarchyType):
"""
Base class for the all trace types.
Specific trace type classes (Scatter, Bar, etc.) are code generated as
subclasses of this class.
"""
def __init__(self, plotly_name, **kwargs):
super(BaseTraceHierarchyType, self).__init__(plotly_name, **kwargs)
# Initialize callback function lists
# ----------------------------------
# ### Callbacks to be called on hover ###
self._hover_callbacks = []
# ### Callbacks to be called on unhover ###
self._unhover_callbacks = []
# ### Callbacks to be called on click ###
self._click_callbacks = []
# ### Callbacks to be called on selection ###
self._select_callbacks = []
# ### Callbacks to be called on deselect ###
self._deselect_callbacks = []
# ### Trace index in figure ###
self._trace_ind = None
# uid
# ---
# All trace types must have a top-level UID
@property
def uid(self):
raise NotImplementedError
@uid.setter
def uid(self, val):
raise NotImplementedError
# Hover
# -----
def on_hover(self, callback, append=False):
"""
Register function to be called when the user hovers over one or more
points in this trace
Note: Callbacks will only be triggered when the trace belongs to a
instance of plotly.graph_objs.FigureWidget and it is displayed in an
ipywidget context. Callbacks will not be triggered on figures
that are displayed using plot/iplot.
Parameters
----------
callback
Callable function that accepts 3 arguments
- this trace
- plotly.callbacks.Points object
- plotly.callbacks.InputDeviceState object
append : bool
If False (the default), this callback replaces any previously
defined on_hover callbacks for this trace. If True,
this callback is appended to the list of any previously defined
callbacks.
Returns
-------
None
Examples
--------
>>> import plotly.graph_objects as go
>>> from plotly.callbacks import Points, InputDeviceState
>>> points, state = Points(), InputDeviceState()
>>> def hover_fn(trace, points, state):
... inds = points.point_inds
... # Do something
>>> trace = go.Scatter(x=[1, 2], y=[3, 0])
>>> trace.on_hover(hover_fn)
Note: The creation of the `points` and `state` objects is optional,
it's simply a convenience to help the text editor perform completion
on the arguments inside `hover_fn`
"""
if not append:
del self._hover_callbacks[:]
if callback:
self._hover_callbacks.append(callback)
def _dispatch_on_hover(self, points, state):
"""
Dispatch points and device state all all hover callbacks
"""
for callback in self._hover_callbacks:
callback(self, points, state)
# Unhover
# -------
def on_unhover(self, callback, append=False):
"""
Register function to be called when the user unhovers away from one
or more points in this trace.
Note: Callbacks will only be triggered when the trace belongs to a
instance of plotly.graph_objs.FigureWidget and it is displayed in an
ipywidget context. Callbacks will not be triggered on figures
that are displayed using plot/iplot.
Parameters
----------
callback
Callable function that accepts 3 arguments
- this trace
- plotly.callbacks.Points object
- plotly.callbacks.InputDeviceState object
append : bool
If False (the default), this callback replaces any previously
defined on_unhover callbacks for this trace. If True,
this callback is appended to the list of any previously defined
callbacks.
Returns
-------
None
Examples
--------
>>> import plotly.graph_objects as go
>>> from plotly.callbacks import Points, InputDeviceState
>>> points, state = Points(), InputDeviceState()
>>> def unhover_fn(trace, points, state):
... inds = points.point_inds
... # Do something
>>> trace = go.Scatter(x=[1, 2], y=[3, 0])
>>> trace.on_unhover(unhover_fn)
Note: The creation of the `points` and `state` objects is optional,
it's simply a convenience to help the text editor perform completion
on the arguments inside `unhover_fn`
"""
if not append:
del self._unhover_callbacks[:]
if callback:
self._unhover_callbacks.append(callback)
def _dispatch_on_unhover(self, points, state):
"""
Dispatch points and device state all all hover callbacks
"""
for callback in self._unhover_callbacks:
callback(self, points, state)
# Click
# -----
def on_click(self, callback, append=False):
"""
Register function to be called when the user clicks on one or more
points in this trace.
Note: Callbacks will only be triggered when the trace belongs to a
instance of plotly.graph_objs.FigureWidget and it is displayed in an
ipywidget context. Callbacks will not be triggered on figures
that are displayed using plot/iplot.
Parameters
----------
callback
Callable function that accepts 3 arguments
- this trace
- plotly.callbacks.Points object
- plotly.callbacks.InputDeviceState object
append : bool
If False (the default), this callback replaces any previously
defined on_click callbacks for this trace. If True,
this callback is appended to the list of any previously defined
callbacks.
Returns
-------
None
Examples
--------
>>> import plotly.graph_objects as go
>>> from plotly.callbacks import Points, InputDeviceState
>>> points, state = Points(), InputDeviceState()
>>> def click_fn(trace, points, state):
... inds = points.point_inds
... # Do something
>>> trace = go.Scatter(x=[1, 2], y=[3, 0])
>>> trace.on_click(click_fn)
Note: The creation of the `points` and `state` objects is optional,
it's simply a convenience to help the text editor perform completion
on the arguments inside `click_fn`
"""
if not append:
del self._click_callbacks[:]
if callback:
self._click_callbacks.append(callback)
def _dispatch_on_click(self, points, state):
"""
Dispatch points and device state all all hover callbacks
"""
for callback in self._click_callbacks:
callback(self, points, state)
# Select
# ------
def on_selection(self, callback, append=False):
"""
Register function to be called when the user selects one or more
points in this trace.
Note: Callbacks will only be triggered when the trace belongs to a
instance of plotly.graph_objs.FigureWidget and it is displayed in an
ipywidget context. Callbacks will not be triggered on figures
that are displayed using plot/iplot.
Parameters
----------
callback
Callable function that accepts 4 arguments
- this trace
- plotly.callbacks.Points object
- plotly.callbacks.BoxSelector or plotly.callbacks.LassoSelector
append : bool
If False (the default), this callback replaces any previously
defined on_selection callbacks for this trace. If True,
this callback is appended to the list of any previously defined
callbacks.
Returns
-------
None
Examples
--------
>>> import plotly.graph_objects as go
>>> from plotly.callbacks import Points
>>> points = Points()
>>> def selection_fn(trace, points, selector):
... inds = points.point_inds
... # Do something
>>> trace = go.Scatter(x=[1, 2], y=[3, 0])
>>> trace.on_selection(selection_fn)
Note: The creation of the `points` object is optional,
it's simply a convenience to help the text editor perform completion
on the `points` arguments inside `selection_fn`
"""
if not append:
del self._select_callbacks[:]
if callback:
self._select_callbacks.append(callback)
def _dispatch_on_selection(self, points, selector):
"""
Dispatch points and selector info to selection callbacks
"""
if "selectedpoints" in self:
# Update the selectedpoints property, which will notify all views
# of the selection change. This is a special case because no
# restyle event is emitted by plotly.js on selection events
# even though these events update the selectedpoints property.
self.selectedpoints = points.point_inds
for callback in self._select_callbacks:
callback(self, points, selector)
# deselect
# --------
def on_deselect(self, callback, append=False):
"""
Register function to be called when the user deselects points
in this trace using doubleclick.
Note: Callbacks will only be triggered when the trace belongs to a
instance of plotly.graph_objs.FigureWidget and it is displayed in an
ipywidget context. Callbacks will not be triggered on figures
that are displayed using plot/iplot.
Parameters
----------
callback
Callable function that accepts 3 arguments
- this trace
- plotly.callbacks.Points object
append : bool
If False (the default), this callback replaces any previously
defined on_deselect callbacks for this trace. If True,
this callback is appended to the list of any previously defined
callbacks.
Returns
-------
None
Examples
--------
>>> import plotly.graph_objects as go
>>> from plotly.callbacks import Points
>>> points = Points()
>>> def deselect_fn(trace, points):
... inds = points.point_inds
... # Do something
>>> trace = go.Scatter(x=[1, 2], y=[3, 0])
>>> trace.on_deselect(deselect_fn)
Note: The creation of the `points` object is optional,
it's simply a convenience to help the text editor perform completion
on the `points` arguments inside `selection_fn`
"""
if not append:
del self._deselect_callbacks[:]
if callback:
self._deselect_callbacks.append(callback)
def _dispatch_on_deselect(self, points):
"""
Dispatch points info to deselection callbacks
"""
if "selectedpoints" in self:
# Update the selectedpoints property, which will notify all views
# of the selection change. This is a special case because no
# restyle event is emitted by plotly.js on selection events
# even though these events update the selectedpoints property.
self.selectedpoints = None
for callback in self._deselect_callbacks:
callback(self, points)
| BaseTraceType |
python | modin-project__modin | modin/core/execution/ray/common/engine_wrapper.py | {
"start": 7600,
"end": 8800
} | class ____: # pragma: no cover
"""
Help synchronize across tasks and actors on cluster.
For details see: https://docs.ray.io/en/latest/advanced.html?highlight=signalactor#multi-node-synchronization-using-an-actor
Parameters
----------
event_count : int
Number of events required for synchronization.
"""
def __init__(self, event_count: int):
self.events = [asyncio.Event() for _ in range(event_count)]
def send(self, event_idx: int):
"""
Indicate that event with `event_idx` has occured.
Parameters
----------
event_idx : int
"""
self.events[event_idx].set()
async def wait(self, event_idx: int):
"""
Wait until event with `event_idx` has occured.
Parameters
----------
event_idx : int
"""
await self.events[event_idx].wait()
def is_set(self, event_idx: int) -> bool:
"""
Check that event with `event_idx` had occured or not.
Parameters
----------
event_idx : int
Returns
-------
bool
"""
return self.events[event_idx].is_set()
| SignalActor |
python | ray-project__ray | rllib/examples/rl_modules/classes/rock_paper_scissors_heuristic_rlm.py | {
"start": 207,
"end": 1499
} | class ____(RLModule):
"""In rock-paper-scissors, always chooses the same action within an episode.
The first move is random, all the following moves are the same as the first one.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._actions_per_vector_idx = defaultdict(int)
@override(RLModule)
def _forward_inference(self, batch, **kwargs):
ret = []
# Note that the obs is the previous move of the opponens (0-2). If it's 3, it
# means that there was no previous move and thus, the episode just started.
for i, obs in enumerate(batch[Columns.OBS]):
if obs == 3:
self._actions_per_vector_idx[i] = np.random.choice([0, 1, 2])
ret.append(self._actions_per_vector_idx[i])
return {Columns.ACTIONS: np.array(ret)}
@override(RLModule)
def _forward_exploration(self, batch, **kwargs):
return self._forward_inference(batch, **kwargs)
@override(RLModule)
def _forward_train(self, batch, **kwargs):
raise NotImplementedError(
"AlwaysSameHeuristicRLM is not trainable! Make sure you do NOT include it "
"in your `config.multi_agent(policies_to_train={...})` set."
)
| AlwaysSameHeuristicRLM |
python | getsentry__sentry-python | tests/integrations/grpc/test_grpc.py | {
"start": 10919,
"end": 11675
} | class ____(gRPCTestServiceServicer):
events = []
@staticmethod
def TestServe(request, context): # noqa: N802
with start_span(
op="test",
name="test",
origin="auto.grpc.grpc.TestService",
):
pass
return gRPCTestMessage(text=request.text)
@staticmethod
def TestUnaryStream(request, context): # noqa: N802
for _ in range(3):
yield gRPCTestMessage(text=request.text)
@staticmethod
def TestStreamStream(request, context): # noqa: N802
for r in request:
yield r
@staticmethod
def TestStreamUnary(request, context): # noqa: N802
requests = [r for r in request]
return requests.pop()
| TestService |
python | tensorflow__tensorflow | tensorflow/python/ops/distributions/gamma.py | {
"start": 1620,
"end": 10078
} | class ____(distribution.Distribution):
"""Gamma distribution.
The Gamma distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(alpha - 1) exp(-x beta) / Z
Z = Gamma(alpha) beta**(-alpha)
```
where:
* `concentration = alpha`, `alpha > 0`,
* `rate = beta`, `beta > 0`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta x) / Gamma(alpha)
```
where `GammaInc` is the [lower incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2 = concentration / mean
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: The samples of this distribution are always non-negative. However,
the samples that are smaller than `np.finfo(dtype).tiny` are rounded
to this value, so it appears more often than it should.
This should only be noticeable when the `concentration` is very small, or the
`rate` is very large. See note in `tf.random.gamma` docstring.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in
(Figurnov et al., 2018).
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
dist = tfd.Gamma(concentration=3.0, rate=2.0)
dist2 = tfd.Gamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
Compute the gradients of samples w.r.t. the parameters:
```python
concentration = tf.constant(3.0)
rate = tf.constant(2.0)
dist = tfd.Gamma(concentration, rate)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [concentration, rate])
```
References:
Implicit Reparameterization Gradients:
[Figurnov et al., 2018]
(http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients)
([pdf](http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="Gamma"):
"""Construct Gamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(Gamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
@distribution_util.AppendDocstring(
"""Note: See `tf.random.gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igamma returns the regularized incomplete gamma function,
# which is what we want for the CDF.
return math_ops.igamma(self.concentration, self.rate * x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return math_ops.xlogy(self.concentration - 1., x) - self.rate * x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
- math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
+ ((1. - self.concentration) *
math_ops.digamma(self.concentration)))
def _mean(self):
return self.concentration / self.rate
def _variance(self):
return self.concentration / math_ops.square(self.rate)
def _stddev(self):
return math_ops.sqrt(self.concentration) / self.rate
@distribution_util.AppendDocstring(
"""The mode of a gamma distribution is `(shape - 1) / rate` when
`shape > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`,
an exception will be raised rather than returning `NaN`.""")
def _mode(self):
mode = (self.concentration - 1.) / self.rate
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where_v2(self.concentration > 1., mode, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="mode not defined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_sample(self, x):
check_ops.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
| Gamma |
python | getsentry__sentry | src/sentry/hybridcloud/rpc/__init__.py | {
"start": 892,
"end": 3253
} | class ____(pydantic.BaseModel):
"""A serializable object that may be part of an RPC schema."""
class Config:
orm_mode = True
use_enum_values = True
@classmethod
def get_field_names(cls) -> Iterable[str]:
return iter(cls.__fields__.keys())
@classmethod
def serialize_by_field_name(
cls,
obj: Any,
name_transform: Callable[[str], str] | None = None,
value_transform: Callable[[Any], Any] | None = None,
) -> Self:
"""Serialize an object with field names matching this model class.
This class method may be called only on an instantiable subclass. The
returned value is an instance of that subclass. The optional "transform"
arguments, if present, modify each field name or attribute value before it is
passed through to the serialized object. Raises AttributeError if the
argument does not have an attribute matching each field name (after
transformation, if any) of this RpcModel class.
This method should not necessarily be used for every serialization operation.
It is useful for model types, such as "flags" objects, where new fields may
be added in the future and we'd like them to be serialized automatically. For
more stable or more complex models, it is more suitable to list the fields
out explicitly in a constructor call.
"""
fields = {}
for rpc_field_name in cls.get_field_names():
if name_transform is not None:
obj_field_name = name_transform(rpc_field_name)
else:
obj_field_name = rpc_field_name
try:
value = getattr(obj, obj_field_name)
except AttributeError as e:
msg = (
f"While serializing to {cls.__name__}, could not extract "
f"{obj_field_name!r} from {type(obj).__name__}"
)
if name_transform is not None:
msg += f" (transformed from {rpc_field_name!r})"
raise AttributeError(msg) from e
if value_transform is not None:
value = value_transform(value)
fields[rpc_field_name] = value
return cls(**fields)
ServiceInterface = TypeVar("ServiceInterface")
| RpcModel |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/condition/test_issue_category_handler.py | {
"start": 462,
"end": 3996
} | class ____(ConditionTestCase):
condition = Condition.ISSUE_CATEGORY
payload = {
"id": IssueCategoryFilter.id,
"value": "1",
}
def setUp(self) -> None:
super().setUp()
self.event_data = WorkflowEventData(event=self.group_event, group=self.group_event.group)
self.dc = self.create_data_condition(
type=self.condition,
comparison={
"value": 1,
},
condition_result=True,
)
def test_dual_write(self) -> None:
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == self.condition
assert dc.comparison == {
"value": 1,
}
assert dc.condition_result is True
assert dc.condition_group == dcg
def test_json_schema(self) -> None:
self.dc.comparison.update({"value": 2})
self.dc.save()
self.dc.comparison.update({"value": "asdf"})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison = {}
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"hello": "there"})
with pytest.raises(ValidationError):
self.dc.save()
def test_valid_input_values(self) -> None:
self.dc.update(comparison={"value": 1})
self.assert_passes(self.dc, self.event_data)
self.dc.update(comparison={"value": str(GroupCategory.ERROR.value)})
self.assert_passes(self.dc, self.event_data)
self.dc.update(comparison={"value": GroupCategory.ERROR.value})
self.assert_passes(self.dc, self.event_data)
def test_fail_on_invalid_data(self) -> None:
data_cases = [
{"value": None},
{},
{"value": GroupCategory.ERROR.name},
{"value": "ERROR"},
{"value": "error"},
]
for data_case in data_cases:
self.dc.update(comparison=data_case)
self.assert_does_not_pass(self.dc, self.event_data)
def test_group_event(self) -> None:
assert self.event.group is not None
group_event = self.event.for_group(self.group)
self.dc.update(comparison={"value": GroupCategory.ERROR.value})
self.assert_passes(self.dc, WorkflowEventData(event=self.event, group=self.group))
self.assert_passes(self.dc, WorkflowEventData(event=group_event, group=self.group))
@patch("sentry.issues.grouptype.GroupTypeRegistry.get_by_type_id")
def test_invalid_issue_category(self, mock_get_by_type_id: MagicMock) -> None:
mock_get_by_type_id.side_effect = ValueError("Invalid group type")
self.assert_does_not_pass(
self.dc, WorkflowEventData(event=self.event, group=self.event.group)
)
def test_category_v2(self) -> None:
perf_group, perf_event, perf_group_event = self.create_group_event(
group_type_id=PerformanceNPlusOneGroupType.type_id
)
# N+1 DB query issue should pass for 'PERFORMANCE' (deprecated) as well as 'DB_QUERY' (category_v2)
self.dc.update(comparison={"value": GroupCategory.PERFORMANCE.value})
self.assert_passes(self.dc, WorkflowEventData(event=perf_group_event, group=perf_group))
self.dc.update(comparison={"value": GroupCategory.DB_QUERY.value})
self.assert_passes(self.dc, WorkflowEventData(event=perf_group_event, group=perf_group))
| TestIssueCategoryCondition |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 80227,
"end": 80665
} | class ____(sgqlc.types.Enum):
"""The privacy of a repository
Enumeration Choices:
* `INTERNAL`: The repository is visible only to users in the same
business.
* `PRIVATE`: The repository is visible only to those with explicit
access.
* `PUBLIC`: The repository is visible to everyone.
"""
__schema__ = github_schema
__choices__ = ("INTERNAL", "PRIVATE", "PUBLIC")
| RepoAccessAuditEntryVisibility |
python | ray-project__ray | python/ray/experimental/channel/shared_memory_channel.py | {
"start": 20624,
"end": 25385
} | class ____(ChannelInterface):
"""A channel that can be read and written by Ray processes.
It creates `num_shm_buffers` number of buffers and allows buffered read and
write APIs. I.e., read and write APIs are non-blocking as long as it can write to
next buffer or read from a next buffer. See `read` and `write` APIs for
more details.
Args:
writer: The actor that may write to the channel. None signifies the driver.
reader_and_node_list: A list of tuples, where each tuple contains a reader
actor handle and the node ID where the actor is located. Note that currently
we only support this for readers on the same node as the writer.
num_shm_buffers: Number of shared memory buffers to read/write.
typ: Type information about the values passed through the channel.
Either an integer representing the max buffer size in bytes
allowed, or a SharedMemoryType.
"""
def __init__(
self,
writer: Optional[ray.actor.ActorHandle],
reader_and_node_list: List[Tuple["ray.actor.ActorHandle", str]],
num_shm_buffers: int,
typ: Optional[Union[int, SharedMemoryType]] = None,
):
self._num_shm_buffers = num_shm_buffers
self._buffers = [
# We use Channel directly as a buffer implementation as
# channel only allows to have 1 shared memory buffer.
Channel(writer, reader_and_node_list, typ)
for _ in range(num_shm_buffers)
]
# The next index to write from self._buffers.
self._next_write_index = 0
# The next index to read from self._buffers.
self._next_read_index = 0
def ensure_registered_as_writer(self):
"""
Check whether the process is a valid writer. This method must be idempotent.
"""
for buffer in self._buffers:
buffer.ensure_registered_as_writer()
def ensure_registered_as_reader(self):
"""
Check whether the process is a valid reader. This method must be idempotent.
"""
for buffer in self._buffers:
buffer.ensure_registered_as_reader()
def write(self, value: Any, timeout: Optional[float] = None) -> None:
"""Write a value to a channel.
If the next buffer is available, it returns immediately. If the next
buffer is not read by downstream consumers, it blocks until a buffer is
available to write. If a buffer is not available within timeout, it raises
RayChannelTimeoutError.
"""
self.ensure_registered_as_writer()
# A single channel is not supposed to read and write at the same time.
assert self._next_read_index == 0
self._buffers[self._next_write_index].write(value, timeout)
self._next_write_index += 1
self._next_write_index %= self._num_shm_buffers
def read(self, timeout: Optional[float] = None) -> Any:
"""Read a value from a channel.
If the next buffer is available, it returns immediately. If the next
buffer is not written by an upstream producer, it blocks until a buffer is
available to read. If a buffer is not available within timeout, it raises
RayChannelTimeoutError.
"""
self.ensure_registered_as_reader()
# A single channel is not supposed to read and write at the same time.
assert self._next_write_index == 0
output = self._buffers[self._next_read_index].read(timeout)
self._next_read_index += 1
self._next_read_index %= self._num_shm_buffers
return output
def release_buffer(self, timeout: Optional[float] = None):
"""Release the native buffer of the channel to allow the buffer to be reused for
future data.
If the next buffer is available, it returns immediately. If the next
buffer is not written by an upstream producer, it blocks until a buffer is
available to be released. If a buffer is not available within timeout, it raises
RayChannelTimeoutError.
"""
# A single channel is not supposed to read and write at the same time.
assert self._next_write_index == 0
self._buffers[self._next_read_index].release_buffer(timeout)
self._next_read_index += 1
self._next_read_index %= self._num_shm_buffers
def close(self) -> None:
for buffer in self._buffers:
buffer.close()
@property
def next_write_index(self):
# Testing only
return self._next_write_index
@property
def next_read_index(self):
# Testing only
return self._next_read_index
@PublicAPI(stability="alpha")
| BufferedSharedMemoryChannel |
python | PyCQA__pylint | tests/functional/s/super/super_checks.py | {
"start": 1043,
"end": 1096
} | class ____:
""" crash """
name = NewAaaa
| Getattr |
python | plotly__plotly.py | tests/test_core/test_graph_objs/test_graph_objs.py | {
"start": 3273,
"end": 4545
} | class ____(TestCase):
def setUp(self):
self.layout = go.Layout(
width=1000,
title={"text": "the title", "font": {"size": 20}},
annotations=[{}, {}],
xaxis2={"range": [1, 2]},
)
def test_pop_valid_simple_prop(self):
self.assertEqual(self.layout.width, 1000)
self.assertEqual(self.layout.pop("width"), 1000)
self.assertIsNone(self.layout.width)
def test_pop_valid_compound_prop(self):
val = self.layout.title
self.assertEqual(self.layout.pop("title"), val)
self.assertEqual(self.layout.title, go.layout.Title())
def test_pop_valid_array_prop(self):
val = self.layout.annotations
self.assertEqual(self.layout.pop("annotations"), val)
self.assertEqual(self.layout.annotations, ())
def test_pop_valid_subplot_prop(self):
val = self.layout.xaxis2
self.assertEqual(self.layout.pop("xaxis2"), val)
self.assertEqual(self.layout.xaxis2, go.layout.XAxis())
def test_pop_invalid_prop_key_error(self):
with self.assertRaises(KeyError):
self.layout.pop("bogus")
def test_pop_invalid_prop_with_default(self):
self.assertEqual(self.layout.pop("bogus", 42), 42)
| TestPop |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/execution_tests/execution_plan_tests/test_host_run_worker.py | {
"start": 1079,
"end": 4335
} | class ____(ReconstructableJob):
def __new__(
cls,
repository,
pipeline_name,
op_selection=None,
asset_selection=None,
):
return super().__new__(
cls,
repository,
pipeline_name,
op_selection,
asset_selection,
)
def get_definition(self): # pyright: ignore[reportIncompatibleVariableOverride]
if os.getpid() == _explode_pid["pid"]:
raise Exception("Got the definition in the run worker process")
return super().get_definition()
def test_host_run_worker():
_explode_pid["pid"] = os.getpid() # pyright: ignore[reportArgumentType]
with dg.instance_for_test() as instance:
run_config = {
"ops": {"op_that_uses_adder_resource": {"inputs": {"number": {"value": 4}}}},
}
execution_plan = create_execution_plan(
job_with_resources,
run_config,
)
dagster_run = instance.create_run_for_job(
job_def=job_with_resources,
execution_plan=execution_plan,
run_config=run_config,
)
recon_job = dg.reconstructable(job_with_resources)
execute_run_host_mode(
ExplodingTestPipeline(recon_job.repository, recon_job.job_name),
dagster_run,
instance,
executor_defs=[dg.multiprocess_executor],
raise_on_error=True,
)
assert instance.get_run_by_id(dagster_run.run_id).status == DagsterRunStatus.SUCCESS # pyright: ignore[reportOptionalMemberAccess]
logs = instance.all_logs(dagster_run.run_id)
assert any(
e.is_dagster_event and "Executing steps using multiprocess executor" in e.message
for e in logs
)
@dg.executor(
name="custom_test_executor",
config_schema={},
)
def test_executor(_init_context):
return MultiprocessExecutor(
max_concurrent=4,
retries=RetryMode.DISABLED,
)
def test_custom_executor_fn():
_explode_pid["pid"] = os.getpid() # pyright: ignore[reportArgumentType]
with dg.instance_for_test() as instance:
run_config = {
"ops": {"op_that_uses_adder_resource": {"inputs": {"number": {"value": 4}}}},
}
execution_plan = create_execution_plan(
job_with_resources,
run_config,
)
dagster_run = instance.create_run_for_job(
job_def=job_with_resources,
execution_plan=execution_plan,
run_config=run_config,
)
recon_job = dg.reconstructable(job_with_resources)
execute_run_host_mode(
ExplodingTestPipeline(recon_job.repository, recon_job.job_name),
dagster_run,
instance,
executor_defs=[test_executor],
raise_on_error=True,
)
assert instance.get_run_by_id(dagster_run.run_id).status == DagsterRunStatus.SUCCESS # pyright: ignore[reportOptionalMemberAccess]
logs = instance.all_logs(dagster_run.run_id)
assert any(
e.is_dagster_event and "Executing steps using multiprocess executor" in e.message
for e in logs
)
| ExplodingTestPipeline |
python | simonw__sqlite-utils | sqlite_utils/utils.py | {
"start": 4649,
"end": 5545
} | class ____:
def __init__(self, wrapped, update):
self._wrapped = wrapped
self._update = update
def __iter__(self):
for line in self._wrapped:
self._update(len(line))
yield line
def read(self, size=-1):
data = self._wrapped.read(size)
self._update(len(data))
return data
@contextlib.contextmanager
def file_progress(file, silent=False, **kwargs):
if silent:
yield file
return
# file.fileno() throws an exception in our test suite
try:
fileno = file.fileno()
except io.UnsupportedOperation:
yield file
return
if fileno == 0: # 0 means stdin
yield file
else:
file_length = os.path.getsize(file.name)
with click.progressbar(length=file_length, **kwargs) as bar:
yield UpdateWrapper(file, bar.update)
| UpdateWrapper |
python | django__django | tests/get_earliest_or_latest/models.py | {
"start": 233,
"end": 401
} | class ____(models.Model):
name = models.CharField(max_length=30)
birthday = models.DateField()
# Note that this model doesn't have "get_latest_by" set.
| Person |
python | sympy__sympy | sympy/core/assumptions.py | {
"start": 22761,
"end": 23402
} | class ____(type):
def __init__(cls, *args, **kwargs):
msg = ("The ManagedProperties metaclass. "
"Basic does not use metaclasses any more")
sympy_deprecation_warning(msg,
deprecated_since_version="1.12",
active_deprecations_target='managedproperties')
# Here we still call this function in case someone is using
# ManagedProperties for something that is not a Basic subclass. For
# Basic subclasses this function is now called by __init_subclass__ and
# so this metaclass is not needed any more.
_prepare_class_assumptions(cls)
| ManagedProperties |
python | catalyst-team__catalyst | catalyst/contrib/utils/thresholds.py | {
"start": 256,
"end": 17009
} | class ____(str, enum.Enum):
"""Available threshold search strategies types."""
NOOP = noop = "noop"
MULTILABEL = multilabel = "multilabel"
MULTICLASS = multiclass = "multiclass"
def get_baseline_thresholds(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN
) -> Tuple[float, List[float]]:
"""Returns baseline thresholds for multiclass/multilabel classification.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, num_classes]
labels: ground truth labels,
numpy array with shape [num_examples, num_classes]
objective: callable function, metric which we want to maximize
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = scores.shape[1]
thresholds = [0.5] * num_classes
predictions = np.greater(scores, thresholds).astype(np.int32)
best_metric = objective(labels, predictions)
return best_metric, thresholds
def get_binary_threshold(
scores: np.ndarray,
labels: np.ndarray,
objective: METRIC_FN,
num_thresholds: int = 100,
) -> Tuple[float, float]:
"""Finds best threshold for binary classification task
based on cross-validation estimates.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, ]
labels: ground truth labels, numpy array with shape [num_examples, ]
objective: callable function, metric which we want to maximize
num_thresholds: number of thresholds ot try for each class
Returns:
tuple with best found objective score and threshold
"""
thresholds = np.linspace(scores.min(), scores.max(), num=num_thresholds)
metric_values = []
for threshold in thresholds:
predictions = (scores >= threshold).astype(np.int32)
if np.sum(predictions) > 0:
metric_value = objective(labels, predictions)
metric_values.append(metric_value)
else:
metric_values.append(0.0)
if np.max(metric_values) == 0.0:
best_metric_value = 0.0
best_threshold = 1.0
else:
best_metric_value = metric_values[np.argmax(metric_values)]
best_threshold = thresholds[np.argmax(metric_values)]
return best_metric_value, best_threshold
def get_multiclass_thresholds(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN
) -> Tuple[List[float], List[float]]:
"""Finds best thresholds for multiclass classification task.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, num_classes]
labels: ground truth labels, numpy array with shape [num_examples, num_classes]
objective: callable function, metric which we want to maximize
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = scores.shape[1]
metrics = [0.0] * num_classes
thresholds = [0.0] * num_classes
# score threshold -> classes with such score
classes_by_threshold = defaultdict(list)
for class_index in range(num_classes):
for score in np.unique(scores[:, class_index]):
classes_by_threshold[score].append(class_index)
for threshold in sorted(classes_by_threshold):
for class_index in classes_by_threshold[threshold]:
metric_value = objective(
labels[:, class_index], scores[:, class_index] >= threshold
)
if metric_value > metrics[class_index]:
metrics[class_index] = metric_value
thresholds[class_index] = threshold
return metrics, thresholds
def get_multilabel_thresholds(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN
):
"""Finds best thresholds for multilabel classification task.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, num_classes]
labels: ground truth labels, numpy array with shape [num_examples, num_classes]
objective: callable function, metric which we want to maximize
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = labels.shape[1]
metrics = [0.0] * num_classes
thresholds = [0.0] * num_classes
for class_index in range(num_classes):
best_metric, best_threshold = get_binary_threshold(
labels=labels[:, class_index],
scores=scores[:, class_index],
objective=objective,
)
metrics[class_index] = best_metric
thresholds[class_index] = best_threshold
return metrics, thresholds
def get_binary_threshold_cv(
scores: np.ndarray,
labels: np.ndarray,
objective: METRIC_FN,
num_splits: int = 5,
num_repeats: int = 1,
random_state: int = 42,
):
"""Finds best threshold
for binary classification task based on cross-validation estimates.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, ]
labels: ground truth labels, numpy array with shape [num_examples, ]
objective: callable function, metric which we want to maximize
num_splits: number of splits to use for cross-validation
num_repeats: number of repeats to use for cross-validation
random_state: random state to use for cross-validation
Returns:
tuple with best found objective score and threshold
"""
rkf = RepeatedStratifiedKFold(
n_splits=num_splits, n_repeats=num_repeats, random_state=random_state
)
fold_metrics, fold_thresholds = [], []
for train_index, valid_index in rkf.split(labels, labels):
labels_train, labels_valid = labels[train_index], labels[valid_index]
scores_train, scores_valid = scores[train_index], scores[valid_index]
_, best_threshold = get_binary_threshold(
labels=labels_train, scores=scores_train, objective=objective
)
valid_predictions = (scores_valid >= best_threshold).astype(np.int32)
best_metric_value = objective(labels_valid, valid_predictions)
fold_metrics.append(best_metric_value)
fold_thresholds.append(best_threshold)
return np.mean(fold_metrics), np.mean(fold_thresholds)
def get_multilabel_thresholds_cv(
scores: np.ndarray,
labels: np.ndarray,
objective: METRIC_FN,
num_splits: int = 5,
num_repeats: int = 1,
random_state: int = 42,
):
"""Finds best thresholds for multilabel classification task
based on cross-validation estimates.
Args:
scores: estimated per-class scores/probabilities predicted by the model,
numpy array with shape [num_examples, num_classes]
labels: ground truth labels, numpy array with shape [num_examples, num_classes]
objective: callable function, metric which we want to maximize
num_splits: number of splits to use for cross-validation
num_repeats: number of repeats to use for cross-validation
random_state: random state to use for cross-validation
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = labels.shape[1]
metrics = [0.0] * num_classes
thresholds = [0.0] * num_classes
for class_index in range(num_classes):
best_metric, best_threshold = get_binary_threshold_cv(
labels=labels[:, class_index],
scores=scores[:, class_index],
objective=objective,
num_splits=num_splits,
num_repeats=num_repeats,
random_state=random_state,
)
metrics[class_index] = best_metric
thresholds[class_index] = best_threshold
return metrics, thresholds
def get_thresholds_greedy(
scores: np.ndarray,
labels: np.ndarray,
score_fn: Callable,
num_iterations: int = 100,
num_thresholds: int = 100,
thresholds: np.ndarray = None,
patience: int = 3,
atol: float = 0.01,
) -> Tuple[float, List[float]]:
"""Finds best thresholds for classification task with brute-force algorithm.
Args:
scores: estimated per-class scores/probabilities predicted by the model
labels: ground truth labels
score_fn: callable function, based on (scores, labels, thresholds)
num_iterations: number of iteration for brute-force algorithm
num_thresholds: number of thresholds ot try for each class
thresholds: baseline thresholds, which we want to optimize
patience: maximum number of iteration before early stop exit
atol: minimum required improvement per iteration for early stop exit
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = scores.shape[1]
if thresholds is None:
thresholds = [0.5] * num_classes
best_metric = score_fn(scores, labels, thresholds)
iteration_metrics = []
for i in range(num_iterations):
if len(iteration_metrics) >= patience:
if best_metric < iteration_metrics[i - patience] + atol:
break
for class_index in range(num_classes):
current_thresholds = thresholds.copy()
class_scores = []
class_thresholds = np.linspace(
scores[:, class_index].min(),
scores[:, class_index].max(),
num=num_thresholds,
)
for threshold in class_thresholds:
current_thresholds[class_index] = threshold
class_score = score_fn(scores, labels, current_thresholds)
class_scores.append(class_score)
best_class_score = np.max(class_scores)
best_score_index = np.argmax(class_scores)
if best_class_score > best_metric:
best_metric = best_class_score
thresholds[class_index] = class_thresholds[best_score_index]
iteration_metrics.append(best_metric)
return best_metric, thresholds
def _multilabel_score_fn(scores, labels, thresholds, objective):
predictions = np.greater(scores, thresholds).astype(np.int32)
return objective(labels, predictions)
def get_multilabel_thresholds_greedy(
scores: np.ndarray,
labels: np.ndarray,
objective: METRIC_FN,
num_iterations: int = 100,
num_thresholds: int = 100,
thresholds: np.ndarray = None,
patience: int = 3,
atol: float = 0.01,
) -> Tuple[float, List[float]]:
"""Finds best thresholds
for multilabel classification task with brute-force algorithm.
Args:
scores: estimated per-class scores/probabilities predicted by the model
labels: ground truth labels
objective: callable function, metric which we want to maximize
num_iterations: number of iteration for brute-force algorithm
num_thresholds: number of thresholds ot try for each class
thresholds: baseline thresholds, which we want to optimize
patience: maximum number of iteration before early stop exit
atol: minimum required improvement per iteration for early stop exit
Returns:
tuple with best found objective score and per-class thresholds
"""
best_metric, thresholds = get_thresholds_greedy(
scores=scores,
labels=labels,
score_fn=partial(_multilabel_score_fn, objective=objective),
num_iterations=num_iterations,
num_thresholds=num_thresholds,
thresholds=thresholds,
patience=patience,
atol=atol,
)
return best_metric, thresholds
def _multiclass_score_fn(scores, labels, thresholds, objective):
scores_copy = scores.copy()
scores_copy[np.less(scores, thresholds)] = 0
predictions = scores_copy.argmax(axis=1)
return objective(labels, predictions)
def get_multiclass_thresholds_greedy(
scores: np.ndarray,
labels: np.ndarray,
objective: METRIC_FN,
num_iterations: int = 100,
num_thresholds: int = 100,
thresholds: np.ndarray = None,
patience: int = 3,
atol: float = 0.01,
) -> Tuple[float, List[float]]:
"""Finds best thresholds
for multiclass classification task with brute-force algorithm.
Args:
scores: estimated per-class scores/probabilities predicted by the model
labels: ground truth labels
objective: callable function, metric which we want to maximize
num_iterations: number of iteration for brute-force algorithm
num_thresholds: number of thresholds ot try for each class
thresholds: baseline thresholds, which we want to optimize
patience: maximum number of iteration before early stop exit
atol: minimum required improvement per iteration for early stop exit
Returns:
tuple with best found objective score and per-class thresholds
"""
best_metric, thresholds = get_thresholds_greedy(
scores=scores,
labels=labels,
score_fn=partial(_multiclass_score_fn, objective=objective),
num_iterations=num_iterations,
num_thresholds=num_thresholds,
thresholds=thresholds,
patience=patience,
atol=atol,
)
return best_metric, thresholds
def get_best_multilabel_thresholds(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN
) -> Tuple[float, List[float]]:
"""Finds best thresholds for multilabel classification task.
Args:
scores: estimated per-class scores/probabilities predicted by the model
labels: ground truth labels
objective: callable function, metric which we want to maximize
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = scores.shape[1]
best_metric, best_thresholds = 0.0, []
for baseline_thresholds_fn in [
get_baseline_thresholds,
get_multiclass_thresholds,
get_binary_threshold,
get_multilabel_thresholds,
]:
_, baseline_thresholds = baseline_thresholds_fn(
labels=labels, scores=scores, objective=objective
)
if isinstance(baseline_thresholds, (int, float)):
baseline_thresholds = [baseline_thresholds] * num_classes
metric_value, thresholds_value = get_multilabel_thresholds_greedy(
labels=labels,
scores=scores,
objective=objective,
thresholds=baseline_thresholds,
)
if metric_value > best_metric:
best_metric = metric_value
best_thresholds = thresholds_value
return best_metric, best_thresholds
def get_best_multiclass_thresholds(
scores: np.ndarray, labels: np.ndarray, objective: METRIC_FN
) -> Tuple[float, List[float]]:
"""Finds best thresholds for multiclass classification task.
Args:
scores: estimated per-class scores/probabilities predicted by the model
labels: ground truth labels
objective: callable function, metric which we want to maximize
Returns:
tuple with best found objective score and per-class thresholds
"""
num_classes = scores.shape[1]
best_metric, best_thresholds = 0.0, []
labels_onehot = np.zeros((labels.size, labels.max() + 1))
labels_onehot[np.arange(labels.size), labels] = 1
for baseline_thresholds_fn in [
get_baseline_thresholds,
get_multiclass_thresholds,
get_binary_threshold,
get_multilabel_thresholds,
]:
_, baseline_thresholds = baseline_thresholds_fn(
labels=labels_onehot, scores=scores, objective=objective
)
if isinstance(baseline_thresholds, (int, float)):
baseline_thresholds = [baseline_thresholds] * num_classes
metric_value, thresholds_value = get_multiclass_thresholds_greedy(
labels=labels,
scores=scores,
objective=objective,
thresholds=baseline_thresholds,
)
if metric_value > best_metric:
best_metric = metric_value
best_thresholds = thresholds_value
return best_metric, best_thresholds
__all__ = [
"get_baseline_thresholds",
"get_binary_threshold",
"get_multiclass_thresholds",
"get_multilabel_thresholds",
"get_binary_threshold_cv",
"get_multilabel_thresholds_cv",
"get_thresholds_greedy",
"get_multilabel_thresholds_greedy",
"get_multiclass_thresholds_greedy",
"get_best_multilabel_thresholds",
"get_best_multiclass_thresholds",
]
| ThresholdMode |
python | wandb__wandb | wandb/sdk/artifacts/artifact_manifest.py | {
"start": 430,
"end": 2716
} | class ____(ArtifactsBase, ABC):
# Note: we can't name this "version" since it conflicts with the prior
# `version()` classmethod.
manifest_version: Annotated[Any, Field(repr=False)]
entries: Dict[str, ArtifactManifestEntry] = Field(default_factory=dict) # noqa: UP006
storage_policy: Annotated[StoragePolicy, Field(exclude=True, repr=False)]
@classmethod
def version(cls) -> int:
return cls.model_fields["manifest_version"].default
@classmethod
@abstractmethod
def from_manifest_json(cls, manifest_json: dict[str, Any]) -> ArtifactManifest:
if (version := manifest_json.get("version")) is None:
raise ValueError("Invalid manifest format. Must contain version field.")
for sub in cls.__subclasses__():
if sub.version() == version:
return sub.from_manifest_json(manifest_json)
raise ValueError("Invalid manifest version.")
def __len__(self) -> int:
return len(self.entries)
@abstractmethod
def to_manifest_json(self) -> dict[str, Any]:
raise NotImplementedError
@abstractmethod
def digest(self) -> HexMD5:
raise NotImplementedError
@abstractmethod
def size(self) -> int:
raise NotImplementedError
def add_entry(self, entry: ArtifactManifestEntry, overwrite: bool = False) -> None:
if (
(not overwrite)
and (old_entry := self.entries.get(entry.path))
and (entry.digest != old_entry.digest)
):
raise ValueError(f"Cannot add the same path twice: {entry.path!r}")
self.entries[entry.path] = entry
def remove_entry(self, entry: ArtifactManifestEntry) -> None:
try:
del self.entries[entry.path]
except LookupError:
raise FileNotFoundError(f"Cannot remove missing entry: {entry.path!r}")
def get_entry_by_path(self, path: str) -> ArtifactManifestEntry | None:
return self.entries.get(path)
def get_entries_in_directory(self, directory: str) -> list[ArtifactManifestEntry]:
# entry keys (paths) use forward slash even for windows
dir_prefix = f"{directory}/"
return [obj for key, obj in self.entries.items() if key.startswith(dir_prefix)]
| ArtifactManifest |
python | numba__numba | numba/tests/test_target_extension.py | {
"start": 23540,
"end": 28255
} | class ____(TestCase):
"""In this use case the CPU compilation pipeline is extended with a new
compilation pass that runs just prior to lowering. The pass looks for
function calls and when it finds one it sees if there's a DPU function
available that is a valid overload for the function call. If there is one
then it swaps the CPU implementation out for a DPU implementation. This
producing an "offload" effect.
"""
def test_basic_offload(self):
_DEBUG = False
# This is the DPU function for sin, it'll return a pi-like constant
@overload(np.sin, target="dpu")
def ol_np_sin_DPU(x):
def dpu_sin_impl(x):
return 314159.0
return dpu_sin_impl
# Check the DPU reports the correct overload value
@djit(nopython=True)
def foo(x):
return np.sin(x)
self.assertPreciseEqual(foo(5), 314159.0)
# Check the CPU call is correct
@njit
def foo(x):
return np.sin(x)
self.assertPreciseEqual(foo(5), np.sin(5))
@register_pass(mutates_CFG=False, analysis_only=False)
class DispatcherSwitcher(FunctionPass):
_name = "DispatcherSwitcher"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
func_ir = state.func_ir
mutated = False
for blk in func_ir.blocks.values():
# find the assignment nodes in the block and walk them, if
# there's a DPU version then swap out for a call to that
for call in blk.find_exprs("call"):
function = state.typemap[call.func.name]
tname = "dpu"
# Note: `target_override` context driven compilation can
# be done here, the DPU target is in use.
with target_override(tname):
try:
sig = function.get_call_type(
state.typingctx,
state.calltypes[call].args,
{},
)
disp = resolve_dispatcher_from_str(tname)
# force compile check
hw_ctx = disp.targetdescr.target_context
hw_ctx.get_function(function, sig)
except Exception as e:
if _DEBUG:
msg = (
f"Failed to find and compile an "
f"overload for {function} for {tname} "
f"due to {e}"
)
print(msg)
continue
# This is a necessary hack at present so as to
# generate code into the same library. I.e. the DPU
# target is going to do code gen into the CPUs lib.
hw_ctx._codelib_stack = (
state.targetctx._codelib_stack
)
# All is good, so switch IR node for one targeting
# this target. Should generate this, but for now
# just mutate as:
# ir.Expr.call(call.func, call.args, call.kws,
# call.loc, target='dpu')
call.target = tname
mutated = True
# return True if the IR was mutated, False if not.
return mutated
# DPU compiler pipeline, compiles with offload to the DPU target
class DPUOffloadCompiler(CompilerBase):
def define_pipelines(self):
pm = DefaultPassBuilder.define_nopython_pipeline(self.state)
pm.add_pass_after(DispatcherSwitcher, PreLowerStripPhis)
pm.finalize()
return [pm]
# Now compile for CPU, but with the DispatcherSwitcher pass in place
# that switches CPU calls for DPU calls
@njit(pipeline_class=DPUOffloadCompiler)
def foo(x):
return np.sin(x), np.cos(x) # np.sin is DPU, np.cos is CPU
self.assertPreciseEqual(foo(5), (314159.0, np.cos(5)))
if __name__ == "__main__":
unittest.main()
| TestTargetOffload |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/random_crop.py | {
"start": 584,
"end": 10553
} | class ____(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly crops images during training.
During training, this layer will randomly choose a location to crop images
down to a target size. The layer will crop all the images in the same batch
to the same cropping location.
At inference time, and during training if an input image is smaller than the
target size, the input will be resized and cropped so as to return the
largest possible window in the image that matches the target aspect ratio.
If you need to apply random cropping at inference time, set `training` to
True when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
def __init__(
self, height, width, seed=None, data_format=None, name=None, **kwargs
):
super().__init__(name=name, **kwargs)
self.height = height
self.width = width
self.seed = (
seed if seed is not None else backend.random.make_default_seed()
)
self.generator = SeedGenerator(seed)
self.data_format = backend.standardize_data_format(data_format)
if self.data_format == "channels_first":
self.height_axis = -2
self.width_axis = -1
elif self.data_format == "channels_last":
self.height_axis = -3
self.width_axis = -2
self.supports_masking = False
self.supports_jit = False
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
def get_random_transformation(self, data, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
if isinstance(data, dict):
input_shape = self.backend.shape(data["images"])
else:
input_shape = self.backend.shape(data)
input_height, input_width = (
input_shape[self.height_axis],
input_shape[self.width_axis],
)
if input_height is None or input_width is None:
raise ValueError(
"RandomCrop requires the input to have a fully defined "
f"height and width. Received: images.shape={input_shape}"
)
if training and input_height > self.height and input_width > self.width:
h_start = self.backend.cast(
self.backend.random.uniform(
(),
0,
maxval=float(input_height - self.height + 1),
seed=seed,
),
"int32",
)
w_start = self.backend.cast(
self.backend.random.uniform(
(),
0,
maxval=float(input_width - self.width + 1),
seed=seed,
),
"int32",
)
else:
crop_height = int(float(input_width * self.height) / self.width)
crop_height = max(min(input_height, crop_height), 1)
crop_width = int(float(input_height * self.width) / self.height)
crop_width = max(min(input_width, crop_width), 1)
h_start = int(float(input_height - crop_height) / 2)
w_start = int(float(input_width - crop_width) / 2)
return h_start, w_start
def transform_images(self, images, transformation, training=True):
if training:
images = self.backend.cast(images, self.compute_dtype)
crop_box_hstart, crop_box_wstart = transformation
crop_height = self.height
crop_width = self.width
if self.data_format == "channels_last":
if len(images.shape) == 4:
images = images[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
images = images[
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
if len(images.shape) == 4:
images = images[
:,
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
else:
images = images[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
shape = self.backend.shape(images)
new_height = shape[self.height_axis]
new_width = shape[self.width_axis]
if (
not isinstance(new_height, int)
or not isinstance(new_width, int)
or new_height != self.height
or new_width != self.width
):
# Resize images if size mismatch or
# if size mismatch cannot be determined
# (in the case of a TF dynamic shape).
images = self.backend.image.resize(
images,
size=(self.height, self.width),
data_format=self.data_format,
)
# Resize may have upcasted the outputs
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
"""
bounding_boxes = {
"boxes": (batch, num_boxes, 4), # left-top-right-bottom (xyxy)
"labels": (batch, num_boxes, num_classes),
}
or
bounding_boxes = {
"boxes": (num_boxes, 4),
"labels": (num_boxes, num_classes),
}
"""
if training:
h_start, w_start = transformation
if not self.backend.is_tensor(bounding_boxes["boxes"]):
bounding_boxes = densify_bounding_boxes(
bounding_boxes, backend=self.backend
)
boxes = bounding_boxes["boxes"]
# Convert to a standard xyxy as operations are done xyxy by default.
boxes = convert_format(
boxes=boxes,
source=self.bounding_box_format,
target="xyxy",
height=self.height,
width=self.width,
)
h_start = self.backend.cast(h_start, boxes.dtype)
w_start = self.backend.cast(w_start, boxes.dtype)
if len(self.backend.shape(boxes)) == 3:
boxes = self.backend.numpy.stack(
[
self.backend.numpy.maximum(boxes[:, :, 0] - h_start, 0),
self.backend.numpy.maximum(boxes[:, :, 1] - w_start, 0),
self.backend.numpy.maximum(boxes[:, :, 2] - h_start, 0),
self.backend.numpy.maximum(boxes[:, :, 3] - w_start, 0),
],
axis=-1,
)
else:
boxes = self.backend.numpy.stack(
[
self.backend.numpy.maximum(boxes[:, 0] - h_start, 0),
self.backend.numpy.maximum(boxes[:, 1] - w_start, 0),
self.backend.numpy.maximum(boxes[:, 2] - h_start, 0),
self.backend.numpy.maximum(boxes[:, 3] - w_start, 0),
],
axis=-1,
)
# Convert to user defined bounding box format
boxes = convert_format(
boxes=boxes,
source="xyxy",
target=self.bounding_box_format,
height=self.height,
width=self.width,
)
return {
"boxes": boxes,
"labels": bounding_boxes["labels"],
}
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(segmentation_masks, transformation)
def compute_output_shape(self, input_shape, *args, **kwargs):
input_shape = list(input_shape)
input_shape[self.height_axis] = self.height
input_shape[self.width_axis] = self.width
return tuple(input_shape)
def get_config(self):
config = super().get_config()
config.update(
{
"height": self.height,
"width": self.width,
"seed": self.seed,
"data_format": self.data_format,
}
)
return config
| RandomCrop |
python | pypa__warehouse | tests/unit/organizations/test_models.py | {
"start": 1352,
"end": 2032
} | class ____:
def test_traversal_finds(self, db_request):
organization_application = DBOrganizationApplicationFactory.create()
_organization_application = OrganizationApplicationFactory(db_request)
assert (
_organization_application[organization_application.id]
== organization_application
)
def test_traversal_cant_find(self, db_request):
DBOrganizationApplicationFactory.create()
_organization_application = OrganizationApplicationFactory(db_request)
with pytest.raises(KeyError):
_organization_application["deadbeef-dead-beef-dead-beefdeadbeef"]
| TestOrganizationApplicationFactory |
python | huggingface__transformers | src/transformers/models/sam3_tracker/modeling_sam3_tracker.py | {
"start": 34745,
"end": 52750
} | class ____(Sam3TrackerPreTrainedModel):
input_modalities = ("image", "text")
_can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam3TrackerTwoWayAttentionBlock, index=2)}
_keys_to_ignore_on_load_unexpected = [
r"^detector_model.",
r"^memory_.*",
r"^mask_downsample.*",
r"^object_pointer_proj.*",
r"^temporal_positional_encoding_projection_layer.*",
"no_memory_positional_encoding",
"no_object_pointer",
"occlusion_spatial_embedding_parameter",
]
_checkpoint_conversion_mapping = {
r"tracker_model.(.+)": r"\1", # the regex allows to remove the prefix, and add it back in revert mode
"detector_model.vision_encoder.backbone.": "vision_encoder.backbone.",
"tracker_neck.": "vision_encoder.neck.",
}
def __init__(self, config: Sam3TrackerConfig):
# loading from a sam3_video config
if hasattr(config, "tracker_config") and config.tracker_config is not None:
if isinstance(config.tracker_config, dict):
config.tracker_config = Sam3TrackerConfig(**config.tracker_config)
config = config.tracker_config
super().__init__(config)
self.shared_image_embedding = Sam3TrackerPositionalEmbedding(config.prompt_encoder_config)
self.vision_encoder = AutoModel.from_config(config.vision_config)
self.prompt_encoder = Sam3TrackerPromptEncoder(config.prompt_encoder_config)
# The module using it is not a PreTrainedModel subclass so we need this
config.mask_decoder_config._attn_implementation = config._attn_implementation
self.mask_decoder = Sam3TrackerMaskDecoder(config.mask_decoder_config)
self.backbone_feature_sizes = config.vision_config.backbone_feature_sizes
# a single token to indicate no memory embedding from previous frames
self.hidden_dim = config.vision_config.fpn_hidden_size
self.no_memory_embedding = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
self.post_init()
def get_input_embeddings(self):
return self.vision_encoder.get_input_embeddings()
def get_image_wide_positional_embeddings(self) -> torch.Tensor:
size = self.prompt_encoder.image_embedding_size
target_device = self.shared_image_embedding.positional_embedding.device
target_dtype = self.shared_image_embedding.positional_embedding.dtype
grid = torch.ones(size, device=target_device, dtype=target_dtype)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / size[0]
x_embed = x_embed / size[1]
positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1))
return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width
@torch.no_grad()
def get_image_embeddings(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> list[torch.Tensor]:
r"""
Returns the image embeddings by passing the pixel values through the vision encoder.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Input pixel values
"""
batch_size = pixel_values.shape[0]
feature_maps, _, _, _ = self.get_image_features(pixel_values, **kwargs)
# add no memory embedding to the last feature map
feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding
# reshape feature maps to the same shape as the backbone feature sizes
image_embeddings = [
feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes)
]
return image_embeddings
@torch.no_grad()
def get_prompt_embeddings(
self,
input_points: Optional[torch.FloatTensor] = None,
input_labels: Optional[torch.LongTensor] = None,
input_boxes: Optional[torch.FloatTensor] = None,
input_masks: Optional[torch.LongTensor] = None,
):
r"""
Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder.
Args:
input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`):
Optional input points for the prompt encoder. The padding of the point is automatically done by the
processor. `point_batch_size` refers to the number of masks that we want the model to predict per
point. The model will output `point_batch_size` times 3 masks in total.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`):
Optional input labels for the prompt encoder. The padding of the labels is automatically done by the
processor, or can be fed by the user.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`):
Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the
processor. users can also pass manually the input boxes.
input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`):
Optional input masks for the prompt encoder.
"""
prompt_output = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
return prompt_output
@check_model_inputs()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
input_points: Optional[torch.FloatTensor] = None,
input_labels: Optional[torch.LongTensor] = None,
input_boxes: Optional[torch.FloatTensor] = None,
input_masks: Optional[torch.LongTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
multimask_output: bool = True,
attention_similarity: Optional[torch.FloatTensor] = None,
target_embedding: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Sam3TrackerImageSegmentationOutput:
r"""
input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`):
Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
better results. The points can be obtained by passing a list of list of list to the processor that will
create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the
second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict
per input point), the third dimension is the number of points per segmentation mask (it is possible to pass
multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
coordinates of the point. If a different number of points is passed either for each image, or for each
mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
computation of the embedding will be skipped for these points using the labels.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`):
Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
official implementation, there are 3 types of labels
- `1`: the point is a point that contains the object of interest
- `0`: the point is a point that does not contain the object of interest
- `-1`: the point corresponds to the background
We added the label:
- `-10`: the point is a padding point, thus should be ignored by the prompt encoder
The padding labels should be automatically done by the processor.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`):
Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch
size, the number of boxes per image and the coordinates of the top left and bottom right point of the box.
In the order (`x1`, `y1`, `x2`, `y2`):
- `x1`: the x coordinate of the top left point of the input box
- `y1`: the y coordinate of the top left point of the input box
- `x2`: the x coordinate of the bottom right point of the input box
- `y2`: the y coordinate of the bottom right point of the input box
input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`):
SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`):
Image embeddings, this is used by the mask decoder to generate masks and iou scores. For more memory
efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
method, and then feed them to the `forward` method instead of feeding the `pixel_values`.
multimask_output (`bool`, *optional*):
In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
"best" mask, by specifying `multimask_output=False`.
attention_similarity (`torch.FloatTensor`, *optional*):
Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the
model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
target_embedding (`torch.FloatTensor`, *optional*):
Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case
the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoModel, AutoProcessor
>>> model = AutoModel.from_pretrained("danelcsb/sam3_tracker.1_hiera_tiny")
>>> processor = AutoProcessor.from_pretrained("danelcsb/sam3_tracker.1_hiera_tiny")
>>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png"
>>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
>>> input_points = [[[400, 650]]] # 2D location of a window on the car
>>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt")
>>> # Get segmentation mask
>>> outputs = model(**inputs)
>>> # Postprocess masks
>>> masks = processor.post_process_masks(
... outputs.pred_masks, inputs["original_sizes"]
... )
```
"""
if not ((pixel_values is None) ^ (image_embeddings is None)):
raise ValueError("Exactly one of pixel_values or image_embeddings must be provided.")
if input_points is not None and input_boxes is not None:
if input_points.shape[1] != input_boxes.shape[1]:
raise ValueError(
f"You should provide as many bounding boxes as input points per box. Got {input_points.shape[1]} and {input_boxes.shape[1]}."
)
image_positional_embeddings = self.get_image_wide_positional_embeddings()
# repeat with batch size
batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings[-1].shape[0]
image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)
vision_attentions = None
vision_hidden_states = None
if pixel_values is not None:
feature_maps, _, vision_hidden_states, vision_attentions = self.get_image_features(
pixel_values,
**kwargs,
)
# add no memory embedding to the last feature map
feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding
# reshape feature maps to the same shape as the backbone feature sizes
image_embeddings = [
feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes)
]
if input_points is not None and input_labels is None:
input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)
if input_points is None and input_boxes is None:
# If no points are provide, pad with an empty point (with label -1)
input_points = torch.zeros(
batch_size, 1, 1, 2, dtype=image_embeddings[-1].dtype, device=image_embeddings[-1].device
)
input_labels = -torch.ones(batch_size, 1, 1, dtype=torch.int32, device=image_embeddings[-1].device)
if input_masks is not None:
# If mask_inputs is provided, downsize it into low-res mask input if needed
# and feed it as a dense mask prompt into the SAM mask encoder
if input_masks.shape[-2:] != self.prompt_encoder.mask_input_size:
input_masks = F.interpolate(
input_masks.float(),
size=self.prompt_encoder.mask_input_size,
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
).to(input_masks.dtype)
sparse_embeddings, dense_embeddings = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
low_res_multimasks, iou_scores, _, object_score_logits = self.mask_decoder(
image_embeddings=image_embeddings[-1],
image_positional_embeddings=image_positional_embeddings,
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
high_resolution_features=image_embeddings[:-1],
attention_similarity=attention_similarity,
target_embedding=target_embedding,
**kwargs,
)
return Sam3TrackerImageSegmentationOutput(
iou_scores=iou_scores,
pred_masks=low_res_multimasks,
object_score_logits=object_score_logits,
image_embeddings=image_embeddings,
vision_hidden_states=vision_hidden_states,
vision_attentions=vision_attentions,
)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[
list[torch.Tensor],
list[torch.Tensor],
Optional[tuple[torch.FloatTensor, ...]],
Optional[tuple[torch.FloatTensor, ...]],
]:
r"""
Extract and preprocess image features using the vision encoder.
Args:
pixel_values (`torch.FloatTensor`):
Input pixel values of shape `(batch_size, num_channels, height, width)`.
Returns:
`tuple`: A tuple containing:
- feature_maps (`list[torch.Tensor]`): List of feature maps from different levels.
- feature_maps_position_embeddings (`list[torch.Tensor]`): List of positional embeddings for each feature level.
- vision_hidden_states (`tuple[torch.FloatTensor]`, *optional*): Hidden states from the vision encoder.
- vision_attentions (`tuple[torch.FloatTensor]`, *optional*): Attention weights from the vision encoder.
"""
vision_outputs: Sam3TrackerVisionEncoderOutput = self.vision_encoder(
pixel_values,
**kwargs,
)
feature_maps = vision_outputs.fpn_hidden_states
feature_maps_position_embeddings = vision_outputs.fpn_position_encoding
# precompute projected level 0 and level 1 features in SAM decoder
# to avoid running it again on every SAM click
feature_maps = list(feature_maps)
feature_maps[0] = self.mask_decoder.conv_s0(feature_maps[0])
feature_maps[1] = self.mask_decoder.conv_s1(feature_maps[1])
# flatten NxCxHxW to HWxNxC
feature_maps = [feature_map.flatten(2).permute(2, 0, 1) for feature_map in feature_maps]
feature_maps_position_embeddings = [
feature_map_position_embedding.flatten(2).permute(2, 0, 1)
for feature_map_position_embedding in feature_maps_position_embeddings
]
return feature_maps, feature_maps_position_embeddings, vision_outputs.hidden_states, vision_outputs.attentions
__all__ = ["Sam3TrackerModel", "Sam3TrackerPreTrainedModel"]
| Sam3TrackerModel |
python | astropy__astropy | astropy/table/index.py | {
"start": 11051,
"end": 11166
} | class ____(ValueError):
"""
Indicates that a given index cannot handle the supplied query.
"""
| QueryError |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 1884,
"end": 1973
} | class ____(ModelShow1_plain):
field2 = models.CharField(max_length=30)
| ModelShow2_plain |
python | pyparsing__pyparsing | examples/adventureEngine.py | {
"start": 10782,
"end": 11069
} | class ____(Command):
def __init__(self, quals):
super().__init__("QUIT", "quitting")
@staticmethod
def help_description():
return "QUIT or Q - ends the game"
def _do_command(self, player):
print("Ok....")
player.gameOver = True
| QuitCommand |
python | cherrypy__cherrypy | cherrypy/lib/httputil.py | {
"start": 13649,
"end": 14572
} | class ____(jaraco.collections.KeyTransformingDict):
"""A case-insensitive dict subclass.
Each key is changed on entry to title case.
"""
@staticmethod
def transform_key(key):
"""Title-case an HTTP header name."""
if key is None:
# TODO(#1830): why?
return 'None'
return key.title()
# TEXT = <any OCTET except CTLs, but including LWS>
#
# A CRLF is allowed in the definition of TEXT only as part of a header
# field continuation. It is expected that the folding LWS will be
# replaced with a single SP before interpretation of the TEXT value."
if str == bytes:
header_translate_table = ''.join([chr(i) for i in range(256)])
header_translate_deletechars = ''.join([chr(i) for i in range(32)]) + chr(
127,
)
else:
header_translate_table = None
header_translate_deletechars = bytes(range(32)) + bytes([127])
| CaseInsensitiveDict |
python | django__django | tests/delete_regress/models.py | {
"start": 971,
"end": 1157
} | class ____(models.Model):
child = models.ForeignKey(Child, models.CASCADE)
toy = models.ForeignKey(Toy, models.CASCADE)
date = models.DateField(db_column="date_col")
| PlayedWith |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/comms.py | {
"start": 5768,
"end": 8848
} | class ____(MetaQObjectHasTraits(
'NewBase', (LoggingConfigurable, SuperQObject), {})):
"""
Comm base class
"""
sig_is_closing = QtCore.Signal(object)
def __init__(self, target_name, kernel_client, comm_id=None,
msg_callback=None, close_callback=None):
"""
Create a new comm. Must call open to use.
"""
super().__init__(target_name=target_name)
self.target_name = target_name
self.kernel_client = kernel_client
if comm_id is None:
comm_id = uuid.uuid1().hex
self.comm_id = comm_id
self._msg_callback = msg_callback
self._close_callback = close_callback
self._send_channel = self.kernel_client.shell_channel
def _send_msg(self, msg_type, content, data, metadata, buffers):
"""
Send a message on the shell channel.
"""
if data is None:
data = {}
if content is None:
content = {}
content['comm_id'] = self.comm_id
content['data'] = data
msg = self.kernel_client.session.msg(
msg_type, content, metadata=metadata)
if buffers:
msg['buffers'] = buffers
return self._send_channel.send(msg)
# methods for sending messages
def open(self, data=None, metadata=None, buffers=None):
"""Open the kernel-side version of this comm"""
return self._send_msg(
'comm_open', {'target_name': self.target_name},
data, metadata, buffers)
def send(self, data=None, metadata=None, buffers=None):
"""Send a message to the kernel-side version of this comm"""
return self._send_msg(
'comm_msg', {}, data, metadata, buffers)
def close(self, data=None, metadata=None, buffers=None):
"""Close the kernel-side version of this comm"""
self.sig_is_closing.emit(self)
return self._send_msg(
'comm_close', {}, data, metadata, buffers)
# methods for registering callbacks for incoming messages
def on_msg(self, callback):
"""Register a callback for comm_msg
Will be called with the `data` of any comm_msg messages.
Call `on_msg(None)` to disable an existing callback.
"""
self._msg_callback = callback
def on_close(self, callback):
"""Register a callback for comm_close
Will be called with the `data` of the close message.
Call `on_close(None)` to disable an existing callback.
"""
self._close_callback = callback
# methods for handling incoming messages
def handle_msg(self, msg):
"""Handle a comm_msg message"""
self.log.debug("handle_msg[%s](%s)", self.comm_id, msg)
if self._msg_callback:
return self._msg_callback(msg)
def handle_close(self, msg):
"""Handle a comm_close message"""
self.log.debug("handle_close[%s](%s)", self.comm_id, msg)
if self._close_callback:
return self._close_callback(msg)
__all__ = ['CommManager']
| Comm |
python | neetcode-gh__leetcode | python/0080-remove-duplicates-from-sorted-array-ii.py | {
"start": 0,
"end": 407
} | class ____:
def removeDuplicates(self, nums: List[int]) -> int:
l, r = 0, 0
while r < len(nums):
count = 1
while r + 1 < len(nums) and nums[r] == nums[r + 1]:
r += 1
count += 1
for i in range(min(2, count)):
nums[l] = nums[r]
l += 1
r += 1
return l
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_storage_transfer_service.py | {
"start": 21761,
"end": 24706
} | class ____(GoogleCloudBaseOperator):
"""
Gets the latest state of a long-running operation in Google Storage Transfer Service.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceGetOperationOperator`
:param operation_name: (Required) Name of the transfer operation.
:param gcp_conn_id: The connection ID used to connect to Google
Cloud Platform.
:param api_version: API version used (e.g. v1).
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_operation_get_template_fields]
template_fields: Sequence[str] = (
"operation_name",
"gcp_conn_id",
"google_impersonation_chain",
)
# [END gcp_transfer_operation_get_template_fields]
operator_extra_links = (CloudStorageTransferDetailsLink(),)
def __init__(
self,
*,
project_id: str = PROVIDE_PROJECT_ID,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation_name = operation_name
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.operation_name:
raise AirflowException("The required parameter 'operation_name' is empty or None")
def execute(self, context: Context) -> dict:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
operation = hook.get_transfer_operation(operation_name=self.operation_name)
project_id = self.project_id or hook.project_id
if project_id:
CloudStorageTransferDetailsLink.persist(
context=context,
project_id=project_id,
operation_name=self.operation_name,
)
return operation
| CloudDataTransferServiceGetOperationOperator |
python | kamyu104__LeetCode-Solutions | Python/longest-uncommon-subsequence-ii.py | {
"start": 35,
"end": 825
} | class ____(object):
def findLUSlength(self, strs):
"""
:type strs: List[str]
:rtype: int
"""
def isSubsequence(a, b):
i = 0
for j in xrange(len(b)):
if i >= len(a):
break
if a[i] == b[j]:
i += 1
return i == len(a)
strs.sort(key=len, reverse=True)
for i in xrange(len(strs)):
all_of = True
for j in xrange(len(strs)):
if len(strs[j]) < len(strs[i]):
break
if i != j and isSubsequence(strs[i], strs[j]):
all_of = False
break
if all_of:
return len(strs[i])
return -1
| Solution |
python | ZoranPandovski__al-go-rithms | data_structures/Graphs/graph/Python/floyd_warshall.py | {
"start": 147,
"end": 1520
} | class ____:
# Constructor
def __init__(self, vertices, directed=True):
# default dictionary to store graph
self.graph = defaultdict(list)
# Number of vertices
self.V = vertices
# Is a directed graph?
self.directed = directed
# Initialize adjacency matrix
for i in range(self.V):
self.graph[i] = [INF] * self.V
self.add_edge(i, i, 0)
# Add an edge from vertex u to vertex v with weight w
# if it's a undirected graphs, add an edge from v to u too
def add_edge(self, u, v, w):
self.graph[u][v] = w
if not self.directed:
self.graph[v][u] = w
# Solves the lowest distance between each vertex pair
def solve(self):
for k in range(self.V):
for i in range(self.V):
for j in range(self.V):
if self.graph[i][k] + self.graph[k][j] < self.graph[i][j]:
self.graph[i][j] = self.graph[i][k] + self.graph[k][j]
def print_matrix(self):
for i in range(self.V):
print(self.graph[i])
def main():
g = Graph(4)
g.add_edge(0, 3, 10)
g.add_edge(2, 3, 1)
g.add_edge(0, 1, 5)
g.add_edge(1, 2, 3)
g.add_edge(3, 1, 7)
g.add_edge(2, 0, 9)
g.solve()
g.print_matrix()
if __name__ == '__main__':
main()
| Graph |
python | jazzband__django-oauth-toolkit | tests/migrations/0003_basetestapplication_post_logout_redirect_uris_and_more.py | {
"start": 158,
"end": 853
} | class ____(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.OAUTH2_PROVIDER_ID_TOKEN_MODEL),
("tests", "0002_swapped_models"),
]
operations = [
migrations.AddField(
model_name="basetestapplication",
name="post_logout_redirect_uris",
field=models.TextField(blank=True, help_text="Allowed Post Logout URIs list, space separated"),
),
migrations.AddField(
model_name="sampleapplication",
name="post_logout_redirect_uris",
field=models.TextField(blank=True, help_text="Allowed Post Logout URIs list, space separated"),
),
]
| Migration |
python | astropy__astropy | astropy/cosmology/_src/tests/flrw/test_parameters.py | {
"start": 4299,
"end": 7043
} | class ____(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a Cosmology.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls: type[Cosmology]):
"""Test Parameter ``Ode0`` on the class."""
Ode0 = cosmo_cls.parameters.get(
"Ode0", cosmo_cls._derived_parameters.get("Ode0")
)
assert isinstance(Ode0, Parameter)
assert "Omega dark energy" in Ode0.__doc__
if issubclass(cosmo_cls, FlatFLRWMixin):
assert Ode0.default == 0
else:
assert Ode0.default is MISSING
def test_Parameter_Ode0_validation(
self, cosmo_cls: type[Cosmology], cosmo: Cosmology
):
"""Test Parameter ``Ode0`` validation."""
Ode0 = cosmo_cls.parameters.get(
"Ode0", cosmo_cls._derived_parameters.get("Ode0")
)
assert Ode0.validate(cosmo, 1.1) == 1.1
assert Ode0.validate(cosmo, 10 * u.one) == 10.0
with pytest.raises(TypeError, match="only dimensionless"):
Ode0.validate(cosmo, 10 * u.km)
def test_Ode0(self, cosmo: Cosmology):
"""Test Parameter ``Ode0`` validation."""
# if Ode0 is a parameter, test its value
assert cosmo.Ode0 is cosmo.__dict__["Ode0"]
assert cosmo.Ode0 == self._cls_args["Ode0"]
assert isinstance(cosmo.Ode0, float)
def test_init_Ode0(self, cosmo_cls: type[Cosmology], ba: BoundArguments):
"""Test initialization for values of ``Ode0``."""
# test that it works with units
ba.arguments["Ode0"] = ba.arguments["Ode0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# also without units
ba.arguments["Ode0"] = ba.arguments["Ode0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# Setting param to 0 respects that. Note this test uses ``Ode()``.
ba.arguments["Ode0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert_quantity_allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert_quantity_allclose(cosmo.Ode(1), 0)
# Must be dimensionless or have no units. Errors otherwise.
ba.arguments["Ode0"] = 10 * u.km
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls(*ba.args, **ba.kwargs)
# =============================================================================
| ParameterOde0TestMixin |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/dag_object_graph.py | {
"start": 895,
"end": 1176
} | class ____(tf.Module):
def __init__(self):
super(Child, self).__init__()
self.my_variable = tf.Variable(3.)
# Creates a dag object graph.
# There is only one instance of `Child`, but it is reachable via two names.
# Thus, self.my_variable is reachable via two paths.
| Child |
python | ray-project__ray | python/ray/tune/registry.py | {
"start": 8823,
"end": 9426
} | class ____:
def __init__(self):
self.to_flush = {}
self.references = {}
def put(self, k, v):
self.to_flush[k] = v
if ray.is_initialized():
self.flush()
def get(self, k):
if not ray.is_initialized():
return self.to_flush[k]
return ray.get(self.references[k])
def flush(self):
for k, v in self.to_flush.items():
if isinstance(v, ray.ObjectRef):
self.references[k] = v
else:
self.references[k] = ray.put(v)
self.to_flush.clear()
| _ParameterRegistry |
python | encode__django-rest-framework | tests/test_generics.py | {
"start": 15538,
"end": 16199
} | class ____(generics.ListCreateAPIView):
queryset = TwoFieldModel.objects.all()
renderer_classes = (renderers.BrowsableAPIRenderer, renderers.JSONRenderer)
def get_serializer_class(self):
if self.request.method == 'POST':
class DynamicSerializer(serializers.ModelSerializer):
class Meta:
model = TwoFieldModel
fields = ('field_b',)
else:
class DynamicSerializer(serializers.ModelSerializer):
class Meta:
model = TwoFieldModel
fields = '__all__'
return DynamicSerializer
| DynamicSerializerView |
python | zarr-developers__zarr-python | src/zarr/codecs/numcodecs/_codecs.py | {
"start": 11549,
"end": 11622
} | class ____(_NumcodecsChecksumCodec, codec_name="adler32"):
pass
| Adler32 |
python | numba__numba | numba/core/utils.py | {
"start": 22358,
"end": 22535
} | class ____(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, _lazy_pformat):
return str(obj)
return super().default(obj)
| _LazyJSONEncoder |
python | huggingface__transformers | src/transformers/models/d_fine/modular_d_fine.py | {
"start": 40754,
"end": 42128
} | class ____(RTDetrModel):
def __init__(self, config: DFineConfig):
super().__init__(config)
del self.decoder_input_proj
self.encoder = DFineHybridEncoder(config=config)
num_backbone_outs = len(config.decoder_in_channels)
decoder_input_proj = []
in_channels = config.decoder_in_channels[-1]
for _ in range(num_backbone_outs):
if config.hidden_size == config.decoder_in_channels[-1]:
decoder_input_proj.append(nn.Identity())
else:
conv = nn.Conv2d(in_channels, config.d_model, kernel_size=1, bias=False)
batchnorm = nn.BatchNorm2d(config.d_model, config.batch_norm_eps)
decoder_input_proj.append(nn.Sequential(conv, batchnorm))
for _ in range(config.num_feature_levels - num_backbone_outs):
if config.hidden_size == config.decoder_in_channels[-1]:
decoder_input_proj.append(nn.Identity())
else:
conv = nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1, bias=False)
batchnorm = nn.BatchNorm2d(config.d_model, config.batch_norm_eps)
decoder_input_proj.append(nn.Sequential(conv, batchnorm))
self.decoder_input_proj = nn.ModuleList(decoder_input_proj)
self.decoder = DFineDecoder(config)
| DFineModel |
python | getsentry__sentry | src/sentry/backup/crypto.py | {
"start": 12167,
"end": 12438
} | class ____:
"""
An Encryptor and Decryptor that use paired public and private keys, respectively.
"""
def __init__(self, encryptor: Encryptor, decryptor: Decryptor):
self.encryptor = encryptor
self.decryptor = decryptor
| EncryptorDecryptorPair |
python | ipython__ipython | IPython/terminal/embed.py | {
"start": 651,
"end": 922
} | class ____(Exception):pass
# kept for backward compatibility as IPython 6 was released with
# the typo. See https://github.com/ipython/ipython/pull/10706
KillEmbeded = KillEmbedded
# This is an additional magic that is exposed in embedded shells.
@magics_class
| KillEmbedded |
python | Textualize__textual | src/textual/demo/widgets.py | {
"start": 1027,
"end": 2884
} | class ____(containers.VerticalGroup):
"""Buttons demo."""
ALLOW_MAXIMIZE = True
DEFAULT_CLASSES = "column"
DEFAULT_CSS = """
Buttons {
ItemGrid { margin-bottom: 1;}
Button { width: 1fr; }
}
"""
BUTTONS_MD = """\
## Buttons
A simple button, with a number of semantic styles.
May be rendered unclickable by setting `disabled=True`.
Press `return` to active a button when focused (or click it).
"""
def compose(self) -> ComposeResult:
yield Markdown(self.BUTTONS_MD)
with containers.ItemGrid(min_column_width=20, regular=True):
yield Button(
"Default",
tooltip="The default button style",
action="notify('you pressed Default')",
)
yield Button(
"Primary",
variant="primary",
tooltip="The primary button style - carry out the core action of the dialog",
action="notify('you pressed Primary')",
)
yield Button(
"Warning",
variant="warning",
tooltip="The warning button style - warn the user that this isn't a typical button",
action="notify('you pressed Warning')",
)
yield Button(
"Error",
variant="error",
tooltip="The error button style - clicking is a destructive action",
action="notify('you pressed Error')",
)
with containers.ItemGrid(min_column_width=20, regular=True):
yield Button("Default", disabled=True)
yield Button("Primary", variant="primary", disabled=True)
yield Button("Warning", variant="warning", disabled=True)
yield Button("Error", variant="error", disabled=True)
| Buttons |
python | pypa__hatch | tests/backend/builders/test_wheel.py | {
"start": 22601,
"end": 24284
} | class ____:
def test_default(self, isolation):
builder = WheelBuilder(str(isolation))
assert builder.config.strict_naming is builder.config.strict_naming is True
def test_target(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"strict-naming": False}}}}}}
builder = WheelBuilder(str(isolation), config=config)
assert builder.config.strict_naming is False
def test_target_not_boolean(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"strict-naming": 9000}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Field `tool.hatch.build.targets.wheel.strict-naming` must be a boolean"):
_ = builder.config.strict_naming
def test_global(self, isolation):
config = {"tool": {"hatch": {"build": {"strict-naming": False}}}}
builder = WheelBuilder(str(isolation), config=config)
assert builder.config.strict_naming is False
def test_global_not_boolean(self, isolation):
config = {"tool": {"hatch": {"build": {"strict-naming": 9000}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Field `tool.hatch.build.strict-naming` must be a boolean"):
_ = builder.config.strict_naming
def test_target_overrides_global(self, isolation):
config = {"tool": {"hatch": {"build": {"strict-naming": False, "targets": {"wheel": {"strict-naming": True}}}}}}
builder = WheelBuilder(str(isolation), config=config)
assert builder.config.strict_naming is True
| TestStrictNaming |
python | kamyu104__LeetCode-Solutions | Python/number-of-good-pairs.py | {
"start": 50,
"end": 266
} | class ____(object):
def numIdenticalPairs(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sum(c*(c-1)//2 for c in collections.Counter(nums).itervalues())
| Solution |
python | django-guardian__django-guardian | example_project/core/migrations/0002_auto_20190629_0848.py | {
"start": 130,
"end": 992
} | class ____(migrations.Migration):
dependencies = [
("core", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="customuser",
name="last_name",
field=models.CharField(blank=True, max_length=150, verbose_name="last name"),
),
migrations.AlterField(
model_name="customuser",
name="username",
field=models.CharField(
error_messages={"unique": "A user with that username already exists."},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name="username",
),
),
]
| Migration |
python | pytorch__pytorch | test/jit/test_dce.py | {
"start": 222,
"end": 2181
} | class ____(JitTestCase):
def test_setattr_no_aliasdb(self):
class Net(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.empty([2, 2])
def forward(self):
x = torch.rand([3, 3])
self.x = x
net = torch.jit.script(Net())
FileCheck().check("prim::SetAttr").run(net.graph)
def test_setattr_removed(self):
@torch.jit.script
class Thing1:
def __init__(self) -> None:
self.x = torch.zeros([2, 2])
make_global(Thing1)
class Thing2(torch.nn.Module):
def forward(self):
x = torch.rand([2, 2])
y = torch.rand([2, 2])
t1 = Thing1()
t1.x = x
return y
unscripted = Thing2()
t2 = torch.jit.script(unscripted)
t2.eval()
# freezing inlines t1.__init__(), after which DCE can occur.
t2 = torch.jit.freeze(t2)
FileCheck().check_not("prim::SetAttr").run(t2.graph)
def test_mutated_simple(self):
def fn(x: torch.Tensor):
y = x.sin()
y_slice = y[::2]
y_slice.add_(x[::2])
z = y.cos()
return z
fn_s = torch.jit.script(fn)
torch._C._jit_pass_dce_graph(fn_s.graph)
FileCheck().check("aten::add_").run(fn_s.graph)
def test_mutated_loop(self):
def fn(x: torch.Tensor):
y = x.sin()
y_slice = y[::2]
y_slice.add_(x[::2])
for _ in range(2):
y_slice = y[::2]
y = y.repeat(2)
z = y.cos()
return z
fn_s = torch.jit.script(fn)
torch._C._jit_pass_dce_graph(fn_s.graph)
FileCheck().check("aten::add_").run(fn_s.graph)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestDCE |
python | mlflow__mlflow | mlflow/store/model_registry/databricks_workspace_model_registry_rest_store.py | {
"start": 2634,
"end": 7111
} | class ____(RestStore):
def __init__(self, store_uri, tracking_uri):
super().__init__(get_host_creds=partial(get_databricks_host_creds, store_uri))
self.tracking_uri = tracking_uri
def set_registered_model_alias(self, name, alias, version):
_raise_unsupported_method(method="set_registered_model_alias")
def delete_registered_model_alias(self, name, alias):
_raise_unsupported_method(method="delete_registered_model_alias")
def get_model_version_by_alias(self, name, alias):
_raise_unsupported_method(
method="get_model_version_by_alias",
message="If attempting to load a model version by alias via a URI of the form "
"'models:/model_name@alias_name', configure the MLflow client to target Unity Catalog "
"and try again.",
)
def _await_model_version_creation(self, mv, await_creation_for):
uc_hint = (
" For faster model version creation, use Models in Unity Catalog "
"(https://docs.databricks.com/en/machine-learning/manage-model-lifecycle/index.html)."
)
self._await_model_version_creation_impl(mv, await_creation_for, hint=uc_hint)
def copy_model_version(self, src_mv, dst_name):
"""
Copy a model version from one registered model to another as a new model version.
This method can be used within the Databricks workspace registry to copy model versions
between registered models, or to migrate model versions from the Databricks workspace
registry to Unity Catalog. During the migration, signature validation can be bypassed
by setting the `MLFLOW_SKIP_SIGNATURE_CHECK_FOR_UC_REGISTRY_MIGRATION`environment variable
to `True`.
Args:
src_mv: A :py:class:`mlflow.entities.model_registry.ModelVersion` object representing
the source model version.
dst_name: The name of the registered model to copy the model version to. If a
registered model with this name does not exist, it will be created.
Returns:
Single :py:class:`mlflow.entities.model_registry.ModelVersion` object representing
the cloned model version.
"""
if dst_name.count(".") == 2:
source_uri = f"models:/{src_mv.name}/{src_mv.version}"
try:
local_model_dir = mlflow.artifacts.download_artifacts(
artifact_uri=source_uri,
tracking_uri=self.tracking_uri,
registry_uri="databricks",
)
except Exception as e:
raise MlflowException(
f"Unable to download model {src_mv.name} version {src_mv.version} "
f"artifacts from Databricks workspace registry in order to migrate "
f"them to Unity Catalog. Please ensure the model version artifacts "
f"exist and that you can download them via "
f"mlflow.artifacts.download_artifacts()"
) from e
uc_store = UcModelRegistryStore(
store_uri=_DATABRICKS_UNITY_CATALOG_SCHEME,
tracking_uri=self.tracking_uri,
)
try:
create_model_response = uc_store.create_registered_model(dst_name)
eprint(f"Successfully registered model '{create_model_response.name}'.")
except MlflowException as e:
if e.error_code != ErrorCode.Name(RESOURCE_ALREADY_EXISTS):
raise
eprint(
f"Registered model '{dst_name}' already exists."
f" Creating a new version of this model..."
)
skip_signature = MLFLOW_SKIP_SIGNATURE_CHECK_FOR_UC_REGISTRY_MIGRATION.get()
source_workspace_id = _extract_workspace_id_from_run_link(src_mv.run_link)
return uc_store._create_model_version_with_optional_signature_validation(
name=dst_name,
source=source_uri,
run_id=src_mv.run_id,
local_model_path=local_model_dir,
model_id=src_mv.model_id,
bypass_signature_validation=skip_signature,
source_workspace_id=source_workspace_id,
)
else:
return super().copy_model_version(src_mv, dst_name)
| DatabricksWorkspaceModelRegistryRestStore |
python | scrapy__scrapy | tests/test_commands.py | {
"start": 704,
"end": 2037
} | class ____:
def setup_method(self):
self.command = EmptyCommand()
self.command.settings = Settings()
self.parser = argparse.ArgumentParser(
formatter_class=ScrapyHelpFormatter, conflict_handler="resolve"
)
self.command.add_options(self.parser)
def test_settings_json_string(self):
feeds_json = '{"data.json": {"format": "json"}, "data.xml": {"format": "xml"}}'
opts, args = self.parser.parse_known_args(
args=["-s", f"FEEDS={feeds_json}", "spider.py"]
)
self.command.process_options(args, opts)
assert isinstance(self.command.settings["FEEDS"], scrapy.settings.BaseSettings)
assert dict(self.command.settings["FEEDS"]) == json.loads(feeds_json)
def test_help_formatter(self):
formatter = ScrapyHelpFormatter(prog="scrapy")
part_strings = [
"usage: scrapy genspider [options] <name> <domain>\n\n",
"\n",
"optional arguments:\n",
"\n",
"Global Options:\n",
]
assert formatter._join_parts(part_strings) == (
"Usage\n=====\n scrapy genspider [options] <name> <domain>\n\n\n"
"Optional Arguments\n==================\n\n"
"Global Options\n--------------\n"
)
| TestCommandSettings |
python | falconry__falcon | falcon/errors.py | {
"start": 4054,
"end": 4713
} | class ____(ConnectionError):
"""The websocket connection is lost.
This error is raised when attempting to perform an operation on the
WebSocket and it is determined that either the client has closed the
connection, the server closed the connection, or the socket has otherwise
been lost.
Keyword Args:
code (int): The WebSocket close code, as per the WebSocket spec
(default ``1000``).
"""
code: int
"""The WebSocket close code, as per the WebSocket spec."""
def __init__(self, code: int | None = None) -> None:
self.code = code or 1000 # Default to "Normal Closure"
| WebSocketDisconnected |
python | huggingface__transformers | src/transformers/models/granitemoeshared/modeling_granitemoeshared.py | {
"start": 30466,
"end": 35211
} | class ____(GraniteMoeSharedPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config: GraniteMoeSharedConfig):
super().__init__(config)
self.model = GraniteMoeSharedModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_local_experts
self.num_experts_per_tok = config.num_experts_per_tok
self.logits_scaling = config.logits_scaling
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, MoeCausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, GraniteMoeSharedForCausalLM
>>> model = GraniteMoeSharedForCausalLM.from_pretrained("ibm/PowerMoE-3b")
>>> tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
# Only compute necessary logits
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
logits = logits / self.config.logits_scaling
loss = None
if labels is not None:
# Upcast to float if we need to compute the loss to avoid potential precision issues
logits = logits.float()
# Flatten the tokens
loss = self.loss_function(
logits,
labels,
vocab_size=self.config.vocab_size,
**kwargs,
)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
__all__ = ["GraniteMoeSharedForCausalLM", "GraniteMoeSharedModel", "GraniteMoeSharedPreTrainedModel"]
| GraniteMoeSharedForCausalLM |
python | aimacode__aima-python | probability4e.py | {
"start": 13823,
"end": 18975
} | class ____:
""" A Bayesian network node with continuous distribution or with continuous distributed parents """
def __init__(self, name, d_parents, c_parents, parameters, type):
"""
A continuous Bayesian node has two types of parents: discrete and continuous.
:param d_parents: str, name of discrete parents, value of which determines distribution parameters
:param c_parents: str, name of continuous parents, value of which is used to calculate distribution
:param parameters: a dict, parameters for distribution of current node, keys corresponds to discrete parents
:param type: str, type of current node's value, either 'd' (discrete) or 'c'(continuous)
"""
self.parameters = parameters
self.type = type
self.d_parents = d_parents.split()
self.c_parents = c_parents.split()
self.parents = self.d_parents + self.c_parents
self.variable = name
self.children = []
def continuous_p(self, value, c_event, d_event):
"""
Probability given the value of current node and its parents
:param c_event: event of continuous nodes
:param d_event: event of discrete nodes
"""
assert isinstance(c_event, dict)
assert isinstance(d_event, dict)
d_event_vals = event_values(d_event, self.d_parents)
if len(d_event_vals) == 1:
d_event_vals = d_event_vals[0]
param = self.parameters[d_event_vals]
if self.type == "c":
p = gaussian_probability(param, c_event, value)
if self.type == "d":
p = logistic_probability(param, c_event, value)
return p
# harvest-buy example. Figure 13.5
harvest_buy = BayesNet([
('Subsidy', '', 0.001),
('Harvest', '', 0.002),
('Cost', 'Subsidy', 'Harvest',
{True: {'sigma': 0.5, 'b': 1, 'a': {'Harvest': 0.5}},
False: {'sigma': 0.6, 'b': 1, 'a': {'Harvest': 0.5}}}, 'c'),
('Buys', '', 'Cost', {T: {'mu': 0.5, 'sigma': 0.5}, F: {'mu': 0.6, 'sigma': 0.6}}, 'd')])
# ______________________________________________________________________________
# 13.3 Exact Inference in Bayesian Networks
# 13.3.1 Inference by enumeration
def enumeration_ask(X, e, bn):
"""
Return the conditional probability distribution of variable X
given evidence e, from BayesNet bn. [Figure 13.10]
>>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
... ).show_approx()
'False: 0.716, True: 0.284'
"""
assert X not in e, "Query variable must be distinct from evidence"
Q = ProbDist(X)
for xi in bn.variable_values(X):
Q[xi] = enumerate_all(bn.variables, extend(e, X, xi), bn)
return Q.normalize()
def enumerate_all(variables, e, bn):
"""
Return the sum of those entries in P(variables | e{others})
consistent with e, where P is the joint distribution represented
by bn, and e{others} means e restricted to bn's other variables
(the ones other than variables). Parents must precede children in variables.
"""
if not variables:
return 1.0
Y, rest = variables[0], variables[1:]
Ynode = bn.variable_node(Y)
if Y in e:
return Ynode.p(e[Y], e) * enumerate_all(rest, e, bn)
else:
return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn)
for y in bn.variable_values(Y))
# ______________________________________________________________________________
# 13.3.2 The variable elimination algorithm
def elimination_ask(X, e, bn):
"""
Compute bn's P(X|e) by variable elimination. [Figure 13.12]
>>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
... ).show_approx()
'False: 0.716, True: 0.284'
"""
assert X not in e, "Query variable must be distinct from evidence"
factors = []
for var in reversed(bn.variables):
factors.append(make_factor(var, e, bn))
if is_hidden(var, X, e):
factors = sum_out(var, factors, bn)
return pointwise_product(factors, bn).normalize()
def is_hidden(var, X, e):
"""Is var a hidden variable when querying P(X|e)?"""
return var != X and var not in e
def make_factor(var, e, bn):
"""
Return the factor for var in bn's joint distribution given e.
That is, bn's full joint distribution, projected to accord with e,
is the pointwise product of these factors for bn's variables.
"""
node = bn.variable_node(var)
variables = [X for X in [var] + node.parents if X not in e]
cpt = {event_values(e1, variables): node.p(e1[var], e1)
for e1 in all_events(variables, bn, e)}
return Factor(variables, cpt)
def pointwise_product(factors, bn):
return reduce(lambda f, g: f.pointwise_product(g, bn), factors)
def sum_out(var, factors, bn):
"""Eliminate var from all factors by summing over its values."""
result, var_factors = [], []
for f in factors:
(var_factors if var in f.variables else result).append(f)
result.append(pointwise_product(var_factors, bn).sum_out(var, bn))
return result
| ContinuousBayesNode |
python | numba__numba | numba/cpython/setobj.py | {
"start": 41796,
"end": 57246
} | class ____(object):
def __init__(self, context, builder, iter_type, iter_val):
self._context = context
self._builder = builder
self._ty = iter_type
self._iter = context.make_helper(builder, iter_type, iter_val)
ptr = self._context.nrt.meminfo_data(builder, self.meminfo)
self._payload = _SetPayload(context, builder, self._ty.container, ptr)
@classmethod
def from_set(cls, context, builder, iter_type, set_val):
set_inst = SetInstance(context, builder, iter_type.container, set_val)
self = cls(context, builder, iter_type, None)
index = context.get_constant(types.intp, 0)
self._iter.index = cgutils.alloca_once_value(builder, index)
self._iter.meminfo = set_inst.meminfo
return self
@property
def value(self):
return self._iter._getvalue()
@property
def meminfo(self):
return self._iter.meminfo
@property
def index(self):
return self._builder.load(self._iter.index)
@index.setter
def index(self, value):
self._builder.store(value, self._iter.index)
def iternext(self, result):
index = self.index
payload = self._payload
one = ir.Constant(index.type, 1)
result.set_exhausted()
with payload._iterate(start=index) as loop:
# An entry was found
entry = loop.entry
result.set_valid()
result.yield_(entry.key)
self.index = self._builder.add(loop.index, one)
loop.do_break()
#-------------------------------------------------------------------------------
# Constructors
def build_set(context, builder, set_type, items):
"""
Build a set of the given type, containing the given items.
"""
nitems = len(items)
inst = SetInstance.allocate(context, builder, set_type, nitems)
if nitems > 0:
# Populate set. Inlining the insertion code for each item would be very
# costly, instead we create a LLVM array and iterate over it.
array = cgutils.pack_array(builder, items)
array_ptr = cgutils.alloca_once_value(builder, array)
count = context.get_constant(types.intp, nitems)
with cgutils.for_range(builder, count) as loop:
item = builder.load(cgutils.gep(builder, array_ptr, 0, loop.index))
inst.add(item)
return impl_ret_new_ref(context, builder, set_type, inst.value)
@lower_builtin(set)
def set_empty_constructor(context, builder, sig, args):
set_type = sig.return_type
inst = SetInstance.allocate(context, builder, set_type)
return impl_ret_new_ref(context, builder, set_type, inst.value)
@lower_builtin(set, types.IterableType)
def set_constructor(context, builder, sig, args):
set_type = sig.return_type
items_type, = sig.args
items, = args
# If the argument has a len(), preallocate the set so as to
# avoid resizes.
# `for_iter` increfs each item in the set, so a `decref` is required each
# iteration to balance. Because the `incref` from `.add` is dependent on
# the item not already existing in the set, just removing its incref is not
# enough to guarantee all memory is freed
n = call_len(context, builder, items_type, items)
inst = SetInstance.allocate(context, builder, set_type, n)
with for_iter(context, builder, items_type, items) as loop:
inst.add(loop.value)
context.nrt.decref(builder, set_type.dtype, loop.value)
return impl_ret_new_ref(context, builder, set_type, inst.value)
#-------------------------------------------------------------------------------
# Various operations
@lower_builtin(len, types.Set)
def set_len(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
return inst.get_size()
@lower_builtin(operator.contains, types.Set, types.Any)
def in_set(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
return inst.contains(args[1])
@lower_builtin('getiter', types.Set)
def getiter_set(context, builder, sig, args):
inst = SetIterInstance.from_set(context, builder, sig.return_type, args[0])
return impl_ret_borrowed(context, builder, sig.return_type, inst.value)
@lower_builtin('iternext', types.SetIter)
@iternext_impl(RefType.BORROWED)
def iternext_listiter(context, builder, sig, args, result):
inst = SetIterInstance(context, builder, sig.args[0], args[0])
inst.iternext(result)
#-------------------------------------------------------------------------------
# Methods
# One-item-at-a-time operations
@lower_builtin("set.add", types.Set, types.Any)
def set_add(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
item = args[1]
inst.add(item)
return context.get_dummy_value()
@intrinsic
def _set_discard(typingctx, s, item):
sig = types.none(s, item)
def set_discard(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
item = args[1]
inst.discard(item)
return context.get_dummy_value()
return sig, set_discard
@overload_method(types.Set, "discard")
def ol_set_discard(s, item):
return lambda s, item: _set_discard(s, item)
@intrinsic
def _set_pop(typingctx, s):
sig = s.dtype(s)
def set_pop(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
used = inst.payload.used
with builder.if_then(cgutils.is_null(builder, used), likely=False):
context.call_conv.return_user_exc(builder, KeyError,
("set.pop(): empty set",))
return inst.pop()
return sig, set_pop
@overload_method(types.Set, "pop")
def ol_set_pop(s):
return lambda s: _set_pop(s)
@intrinsic
def _set_remove(typingctx, s, item):
sig = types.none(s, item)
def set_remove(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
item = args[1]
found = inst.discard(item)
with builder.if_then(builder.not_(found), likely=False):
context.call_conv.return_user_exc(builder, KeyError,
("set.remove(): key not in set",))
return context.get_dummy_value()
return sig, set_remove
@overload_method(types.Set, "remove")
def ol_set_remove(s, item):
if s.dtype == item:
return lambda s, item: _set_remove(s, item)
# Mutating set operations
@intrinsic
def _set_clear(typingctx, s):
sig = types.none(s)
def set_clear(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
inst.clear()
return context.get_dummy_value()
return sig, set_clear
@overload_method(types.Set, "clear")
def ol_set_clear(s):
return lambda s: _set_clear(s)
@intrinsic
def _set_copy(typingctx, s):
sig = s(s)
def set_copy(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = inst.copy()
return impl_ret_new_ref(context, builder, sig.return_type, other.value)
return sig, set_copy
@overload_method(types.Set, "copy")
def ol_set_copy(s):
return lambda s: _set_copy(s)
def set_difference_update(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
inst.difference(other)
return context.get_dummy_value()
@intrinsic
def _set_difference_update(typingctx, a, b):
sig = types.none(a, b)
return sig, set_difference_update
@overload_method(types.Set, "difference_update")
def set_difference_update_impl(a, b):
check_all_set(a, b)
return lambda a, b: _set_difference_update(a, b)
def set_intersection_update(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
inst.intersect(other)
return context.get_dummy_value()
@intrinsic
def _set_intersection_update(typingctx, a, b):
sig = types.none(a, b)
return sig, set_intersection_update
@overload_method(types.Set, "intersection_update")
def set_intersection_update_impl(a, b):
check_all_set(a, b)
return lambda a, b: _set_intersection_update(a, b)
def set_symmetric_difference_update(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
inst.symmetric_difference(other)
return context.get_dummy_value()
@intrinsic
def _set_symmetric_difference_update(typingctx, a, b):
sig = types.none(a, b)
return sig, set_symmetric_difference_update
@overload_method(types.Set, "symmetric_difference_update")
def set_symmetric_difference_update_impl(a, b):
check_all_set(a, b)
return lambda a, b: _set_symmetric_difference_update(a, b)
@lower_builtin("set.update", types.Set, types.IterableType)
def set_update(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
items_type = sig.args[1]
items = args[1]
# If the argument has a len(), assume there are few collisions and
# presize to len(set) + len(items)
n = call_len(context, builder, items_type, items)
if n is not None:
new_size = builder.add(inst.payload.used, n)
inst.upsize(new_size)
with for_iter(context, builder, items_type, items) as loop:
# make sure that the items being added are of the same dtype as the
# set instance
casted = context.cast(builder, loop.value, items_type.dtype, inst.dtype)
inst.add(casted)
# decref each item to counter balance the incref from `for_iter`
# `.add` will conditionally incref when the item does not already exist
# in the set, therefore removing its incref is not enough to guarantee
# all memory is freed
context.nrt.decref(builder, items_type.dtype, loop.value)
if n is not None:
# If we pre-grew the set, downsize in case there were many collisions
inst.downsize(inst.payload.used)
return context.get_dummy_value()
def gen_operator_impl(op, impl):
@intrinsic
def _set_operator_intr(typingctx, a, b):
sig = a(a, b)
def codegen(context, builder, sig, args):
assert sig.return_type == sig.args[0]
impl(context, builder, sig, args)
return impl_ret_borrowed(context, builder, sig.args[0], args[0])
return sig, codegen
@overload(op)
def _ol_set_operator(a, b):
check_all_set(a, b)
return lambda a, b: _set_operator_intr(a, b)
for op_, op_impl in [
(operator.iand, set_intersection_update),
(operator.ior, set_update),
(operator.isub, set_difference_update),
(operator.ixor, set_symmetric_difference_update),
]:
gen_operator_impl(op_, op_impl)
# Set operations creating a new set
@overload(operator.sub)
@overload_method(types.Set, "difference")
def impl_set_difference(a, b):
check_all_set(a, b)
def difference_impl(a, b):
s = a.copy()
s.difference_update(b)
return s
return difference_impl
@overload(operator.and_)
@overload_method(types.Set, "intersection")
def set_intersection(a, b):
check_all_set(a, b)
def intersection_impl(a, b):
if len(a) < len(b):
s = a.copy()
s.intersection_update(b)
return s
else:
s = b.copy()
s.intersection_update(a)
return s
return intersection_impl
@overload(operator.xor)
@overload_method(types.Set, "symmetric_difference")
def set_symmetric_difference(a, b):
check_all_set(a, b)
def symmetric_difference_impl(a, b):
if len(a) > len(b):
s = a.copy()
s.symmetric_difference_update(b)
return s
else:
s = b.copy()
s.symmetric_difference_update(a)
return s
return symmetric_difference_impl
@overload(operator.or_)
@overload_method(types.Set, "union")
def set_union(a, b):
check_all_set(a, b)
def union_impl(a, b):
if len(a) > len(b):
s = a.copy()
s.update(b)
return s
else:
s = b.copy()
s.update(a)
return s
return union_impl
# Predicates
@intrinsic
def _set_isdisjoint(typingctx, a, b):
sig = types.boolean(a, b)
def codegen(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
return inst.isdisjoint(other)
return sig, codegen
@overload_method(types.Set, "isdisjoint")
def set_isdisjoint(a, b):
check_all_set(a, b)
return lambda a, b: _set_isdisjoint(a, b)
@intrinsic
def _set_issubset(typingctx, a, b):
sig = types.boolean(a, b)
def codegen(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
return inst.issubset(other)
return sig, codegen
@overload(operator.le)
@overload_method(types.Set, "issubset")
def set_issubset(a, b):
check_all_set(a, b)
return lambda a, b: _set_issubset(a, b)
@overload(operator.ge)
@overload_method(types.Set, "issuperset")
def set_issuperset(a, b):
check_all_set(a, b)
def superset_impl(a, b):
return b.issubset(a)
return superset_impl
@intrinsic
def _set_eq(typingctx, a, b):
sig = types.boolean(a, b)
def codegen(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
return inst.equals(other)
return sig, codegen
@overload(operator.eq)
def set_eq(a, b):
check_all_set(a, b)
return lambda a, b: _set_eq(a, b)
@overload(operator.ne)
def set_ne(a, b):
check_all_set(a, b)
def ne_impl(a, b):
return not a == b
return ne_impl
@intrinsic
def _set_lt(typingctx, a, b):
sig = types.boolean(a, b)
def codegen(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
return inst.issubset(other, strict=True)
return sig, codegen
@overload(operator.lt)
def set_lt(a, b):
check_all_set(a, b)
return lambda a, b: _set_lt(a, b)
@overload(operator.gt)
def set_gt(a, b):
check_all_set(a, b)
def gt_impl(a, b):
return b < a
return gt_impl
@lower_builtin(operator.is_, types.Set, types.Set)
def set_is(context, builder, sig, args):
a = SetInstance(context, builder, sig.args[0], args[0])
b = SetInstance(context, builder, sig.args[1], args[1])
ma = builder.ptrtoint(a.meminfo, cgutils.intp_t)
mb = builder.ptrtoint(b.meminfo, cgutils.intp_t)
return builder.icmp_signed('==', ma, mb)
# -----------------------------------------------------------------------------
# Implicit casting
@lower_cast(types.Set, types.Set)
def set_to_set(context, builder, fromty, toty, val):
# Casting from non-reflected to reflected
assert fromty.dtype == toty.dtype
return val
| SetIterInstance |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/concrete_function.py | {
"start": 75037,
"end": 75575
} | class ____:
"""Cleans up reference cycles when a `ConcreteFunction` goes out of scope."""
__slots__ = ["_func_graph"]
def __init__(self, func_graph):
self._func_graph = func_graph
def release(self):
"""Call off the FuncGraph deletion."""
self._func_graph = None
def __del__(self):
if func_graph_module is None or self._func_graph is None:
return
try:
func_graph_module.dismantle_func_graph(self._func_graph)
except: # pylint: disable=bare-except
pass
| ConcreteFunctionGarbageCollector |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 46082,
"end": 47872
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 4]"):
l_x_ = L_x_
add_: "f32[3, 4]" = l_x_.add_(1.0)
relu_: "f32[3, 4]" = torch.relu_(l_x_); l_x_ = None
add: "f32[3, 4]" = add_ + relu_; add_ = relu_ = None
return (add,)
""",
)
self.assertTrue(torch._is_functional_tensor(backend.example_inputs[1][0]))
self.assertEqual(f_out, ff_out)
self.assertEqual(f_out, aot_ff_out)
try:
torch._enable_functionalization(reapply_views=False)
xf = pytree.tree_map(to_fun, x)
x_view = xf.t()
with self.assertRaisesRegex(RuntimeError, "Cannot safely fakify a view"):
f(x_view)
finally:
torch._disable_functionalization()
def test_compile_higher_order_with_functionalization(self):
backend = EagerRecordGraphAndInputs()
cnt = torch._dynamo.testing.CompileCounterWithBackend(backend)
@torch.compile(backend=cnt, fullgraph=True)
def f(x):
return wrap(lambda x: x.add_(1.0), x)
def check_count_and_graph(
exp_frame_count, exp_op_count, exp_n_graph, exp_graph
):
self.assertEqual(cnt.frame_count, exp_frame_count)
self.assertEqual(cnt.op_count, exp_op_count)
self.assertEqual(len(backend.graphs), exp_n_graph)
actual = normalize_gm(
backend.graphs[exp_n_graph - 1].print_readable(print_output=False)
)
self.assertExpectedInline(actual, exp_graph, skip=1)
t = torch.randn([3, 4])
t_clone = t.clone()
t_clone2 = t.clone()
f(t)
check_count_and_graph(
1,
2,
1,
"""\
| GraphModule |
python | walkccc__LeetCode | solutions/1157. Online Majority Element In Subarray/1157.py | {
"start": 0,
"end": 617
} | class ____:
def __init__(self, arr: list[int]):
self.arr = arr
self.TIMES = 20 # 2^TIMES >> |arr|
self.numToIndices = collections.defaultdict(list)
for i, a in enumerate(self.arr):
self.numToIndices[a].append(i)
def query(self, left: int, right: int, threshold: int) -> int:
for _ in range(self.TIMES):
randIndex = random.randint(left, right)
num = self.arr[randIndex]
indices = self.numToIndices[num]
l = bisect.bisect_left(indices, left)
r = bisect.bisect_right(indices, right)
if r - l >= threshold:
return num
return -1
| MajorityChecker |
python | Textualize__textual | src/textual/style.py | {
"start": 1666,
"end": 15922
} | class ____:
"""Represents a style in the Visual interface (color and other attributes).
Styles may be added together, which combines their style attributes.
"""
background: Color | None = None
foreground: Color | None = None
bold: bool | None = None
dim: bool | None = None
italic: bool | None = None
underline: bool | None = None
underline2: bool | None = None
reverse: bool | None = None
strike: bool | None = None
blink: bool | None = None
link: str | None = None
_meta: bytes | None = None
auto_color: bool = False
def __rich_repr__(self) -> rich.repr.Result:
yield "background", self.background, None
yield "foreground", self.foreground, None
yield "bold", self.bold, None
yield "dim", self.dim, None
yield "italic", self.italic, None
yield "underline", self.underline, None
yield "underline2", self.underline2, None
yield "reverse", self.reverse, None
yield "strike", self.strike, None
yield "blink", self.blink, None
yield "link", self.link, None
if self._meta is not None:
yield "meta", self.meta
@cached_property
def _is_null(self) -> bool:
return _get_simple_attributes(self) == (
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
@cached_property
def hash(self) -> int:
"""A hash of the style's attributes."""
return hash(_get_hash_attributes(self))
def __hash__(self) -> int:
return self.hash
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Style):
return NotImplemented
return self.hash == other.hash
def __bool__(self) -> bool:
return not self._is_null
def __str__(self) -> str:
return self.style_definition
@cached_property
def style_definition(self) -> str:
"""Style encoded in a string (may be parsed from `Style.parse`)."""
output: list[str] = []
output_append = output.append
if self.foreground is not None:
output_append(self.foreground.css)
if self.background is not None:
output_append(f"on {self.background.css}")
if self.bold is not None:
output_append("bold" if self.bold else "not bold")
if self.dim is not None:
output_append("dim" if self.dim else "not dim")
if self.italic is not None:
output_append("italic" if self.italic else "not italic")
if self.underline is not None:
output_append("underline" if self.underline else "not underline")
if self.underline2 is not None:
output_append("underline2" if self.underline2 else "not underline2")
if self.strike is not None:
output_append("strike" if self.strike else "not strike")
if self.blink is not None:
output_append("blink" if self.blink else "not blink")
if self.link is not None:
if "'" not in self.link:
output_append(f"link='{self.link}'")
elif '"' not in self.link:
output_append(f'link="{self.link}"')
if self._meta is not None:
for key, value in self.meta.items():
if isinstance(value, str):
if "'" not in key:
output_append(f"{key}='{value}'")
elif '"' not in key:
output_append(f'{key}="{value}"')
else:
output_append(f"{key}={value!r}")
else:
output_append(f"{key}={value!r}")
return " ".join(output)
@cached_property
def markup_tag(self) -> str:
"""Identifier used to close tags in markup."""
output: list[str] = []
output_append = output.append
if self.foreground is not None:
output_append(self.foreground.css)
if self.background is not None:
output_append(f"on {self.background.css}")
if self.bold is not None:
output_append("bold" if self.bold else "not bold")
if self.dim is not None:
output_append("dim" if self.dim else "not dim")
if self.italic is not None:
output_append("italic" if self.italic else "not italic")
if self.underline is not None:
output_append("underline" if self.underline else "not underline")
if self.underline2 is not None:
output_append("underline2" if self.underline2 else "not underline2")
if self.strike is not None:
output_append("strike" if self.strike else "not strike")
if self.blink is not None:
output_append("blink" if self.blink else "not blink")
if self.link is not None:
output_append("link")
if self._meta is not None:
for key, value in self.meta.items():
if isinstance(value, str):
output_append(f"{key}=")
return " ".join(output)
@lru_cache(maxsize=1024 * 4)
def __add__(self, other: object | None) -> Style:
if isinstance(other, Style):
if self._is_null:
return other
if other._is_null:
return self
(
background,
foreground,
bold,
dim,
italic,
underline,
underline2,
reverse,
strike,
blink,
link,
meta,
_meta,
) = _get_attributes(self)
(
other_background,
other_foreground,
other_bold,
other_dim,
other_italic,
other_underline,
other_underline2,
other_reverse,
other_strike,
other_blink,
other_link,
other_meta,
other__meta,
) = _get_attributes(other)
new_style = Style(
(
other_background
if (background is None or background.a == 0)
else background + other_background
),
(
foreground
if (other_foreground is None or other_foreground.a == 0)
else other_foreground
),
bold if other_bold is None else other_bold,
dim if other_dim is None else other_dim,
italic if other_italic is None else other_italic,
underline if other_underline is None else other_underline,
underline2 if other_underline2 is None else other_underline2,
reverse if other_reverse is None else other_reverse,
strike if other_strike is None else other_strike,
blink if other_blink is None else other_blink,
link if other_link is None else other_link,
(
dumps({**meta, **other_meta})
if _meta is not None and other__meta is not None
else (_meta if other__meta is None else other__meta)
),
)
return new_style
elif other is None:
return self
else:
return NotImplemented
__radd__ = __add__
@classmethod
def null(cls) -> Style:
"""Get a null (no color or style) style."""
return NULL_STYLE
@classmethod
def parse(cls, text_style: str, variables: dict[str, str] | None = None) -> Style:
"""Parse a style from text.
Args:
text_style: A style encoded in a string.
variables: Optional mapping of CSS variables. `None` to get variables from the app.
Returns:
New style.
"""
from textual.markup import parse_style
try:
app = active_app.get()
except LookupError:
return parse_style(text_style, variables)
return app.stylesheet.parse_style(text_style)
@classmethod
def _normalize_markup_tag(cls, text_style: str) -> str:
"""Produces a normalized from of a style, used to match closing tags with opening tags.
Args:
text_style: Style to normalize.
Returns:
Normalized markup tag.
"""
try:
style = cls.parse(text_style)
except Exception:
return text_style.strip()
return style.markup_tag
@classmethod
def from_rich_style(
cls, rich_style: RichStyle, theme: TerminalTheme | None = None
) -> Style:
"""Build a Style from a (Rich) Style.
Args:
rich_style: A Rich Style object.
theme: Optional Rich [terminal theme][rich.terminal_theme.TerminalTheme].
Returns:
New Style.
"""
return Style(
(
None
if rich_style.bgcolor is None
else Color.from_rich_color(rich_style.bgcolor, theme)
),
(
None
if rich_style.color is None
else Color.from_rich_color(rich_style.color, theme)
),
bold=rich_style.bold,
dim=rich_style.dim,
italic=rich_style.italic,
underline=rich_style.underline,
underline2=rich_style.underline2,
reverse=rich_style.reverse,
strike=rich_style.strike,
blink=rich_style.blink,
link=rich_style.link,
_meta=rich_style._meta,
)
@classmethod
def from_styles(cls, styles: StylesBase) -> Style:
"""Create a Visual Style from a Textual styles object.
Args:
styles: A Styles object, such as `my_widget.styles`.
"""
text_style = styles.text_style
return Style(
styles.background,
(
Color(0, 0, 0, styles.color.a, auto=True)
if styles.auto_color
else styles.color
),
bold=text_style.bold,
dim=text_style.italic,
italic=text_style.italic,
underline=text_style.underline,
underline2=text_style.underline2,
reverse=text_style.reverse,
strike=text_style.strike,
auto_color=styles.auto_color,
)
@classmethod
def from_meta(cls, meta: Mapping[str, Any]) -> Style:
"""Create a Visual Style containing meta information.
Args:
meta: A dictionary of meta information.
Returns:
A new Style.
"""
return Style(_meta=dumps({**meta}))
@cached_property
def rich_style(self) -> RichStyle:
"""Convert this Styles into a Rich style.
Returns:
A Rich style object.
"""
(
background,
foreground,
bold,
dim,
italic,
underline,
underline2,
reverse,
strike,
blink,
link,
_meta,
) = _get_simple_attributes(self)
color = None if foreground is None else background + foreground
return RichStyle(
color=None if color is None else color.rich_color,
bgcolor=None if background is None else background.rich_color,
bold=bold,
dim=dim,
italic=italic,
underline=underline,
underline2=underline2,
reverse=reverse,
strike=strike,
blink=blink,
link=link,
meta=None if _meta is None else self.meta,
)
def rich_style_with_offset(self, x: int, y: int) -> RichStyle:
"""Get a Rich style with the given offset included in meta.
This is used in text selection.
Args:
x: X coordinate.
y: Y coordinate.
Returns:
A Rich Style object.
"""
(
background,
foreground,
bold,
dim,
italic,
underline,
underline2,
reverse,
strike,
blink,
link,
_meta,
) = _get_simple_attributes(self)
color = None if foreground is None else background + foreground
return RichStyle(
color=None if color is None else color.rich_color,
bgcolor=None if background is None else background.rich_color,
bold=bold,
dim=dim,
italic=italic,
underline=underline,
underline2=underline2,
reverse=reverse,
strike=strike,
blink=blink,
link=link,
meta={**self.meta, "offset": (x, y)},
)
@cached_property
def without_color(self) -> Style:
"""The style without any colors."""
return Style(None, None, *_get_simple_attributes_sans_color(self))
@cached_property
def background_style(self) -> Style:
"""Just the background color, with no other attributes."""
return Style(self.background, _meta=self._meta)
@property
def has_transparent_foreground(self) -> bool:
"""Is the foreground transparent (or not set)?"""
return self.foreground is None or self.foreground.a == 0
@classmethod
def combine(cls, styles: Iterable[Style]) -> Style:
"""Add a number of styles and get the result."""
iter_styles = iter(styles)
return sum(iter_styles, next(iter_styles))
@cached_property
def meta(self) -> Mapping[str, Any]:
"""Get meta information (can not be changed after construction)."""
return {} if self._meta is None else loads(self._meta)
NULL_STYLE = Style()
| Style |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/natural_language.py | {
"start": 1492,
"end": 4642
} | class ____(GoogleCloudBaseOperator):
"""
Finds named entities in the text along with various properties.
Examples properties: entity types, salience, mentions for each entity, and others.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeEntitiesOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START natural_language_analyze_entities_template_fields]
template_fields: Sequence[str] = (
"document",
"gcp_conn_id",
"impersonation_chain",
)
# [END natural_language_analyze_entities_template_fields]
def __init__(
self,
*,
document: dict | Document,
encoding_type: EncodingType | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudNaturalLanguageHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Start analyzing entities")
response = hook.analyze_entities(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished analyzing entities")
return MessageToDict(response._pb)
| CloudNaturalLanguageAnalyzeEntitiesOperator |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-to-complete-all-deliveries.py | {
"start": 70,
"end": 838
} | class ____(object):
def minimumTime(self, d, r):
"""
:type d: List[int]
:type r: List[int]
:rtype: int
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
def lcm(a, b):
return a//gcd(a,b)*b
def binary_search(left, right, check):
while left <= right:
mid = left+(right-left)//2
if check(mid):
right = mid-1
else:
left = mid+1
return left
def check(t):
return t-t//r[0] >= d[0] and t-t//r[1] >= d[1] and t-t//l >= d[0]+d[1]
l = lcm(r[0], r[1])
return binary_search(sum(d), sum(d)*2, check)
| Solution |
python | pypa__pip | src/pip/_internal/utils/temp_dir.py | {
"start": 2130,
"end": 6597
} | class ____:
"""Helper class that owns and cleans up a temporary directory.
This class can be used as a context manager or as an OO representation of a
temporary directory.
Attributes:
path
Location to the created temporary directory
delete
Whether the directory should be deleted when exiting
(when used as a contextmanager)
Methods:
cleanup()
Deletes the temporary directory
When used as a context manager, if the delete attribute is True, on
exiting the context the temporary directory is deleted.
"""
def __init__(
self,
path: str | None = None,
delete: bool | None | _Default = _default,
kind: str = "temp",
globally_managed: bool = False,
ignore_cleanup_errors: bool = True,
):
super().__init__()
if delete is _default:
if path is not None:
# If we were given an explicit directory, resolve delete option
# now.
delete = False
else:
# Otherwise, we wait until cleanup and see what
# tempdir_registry says.
delete = None
# The only time we specify path is in for editables where it
# is the value of the --src option.
if path is None:
path = self._create(kind)
self._path = path
self._deleted = False
self.delete = delete
self.kind = kind
self.ignore_cleanup_errors = ignore_cleanup_errors
if globally_managed:
assert _tempdir_manager is not None
_tempdir_manager.enter_context(self)
@property
def path(self) -> str:
assert not self._deleted, f"Attempted to access deleted path: {self._path}"
return self._path
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.path!r}>"
def __enter__(self: _T) -> _T:
return self
def __exit__(self, exc: Any, value: Any, tb: Any) -> None:
if self.delete is not None:
delete = self.delete
elif _tempdir_registry:
delete = _tempdir_registry.get_delete(self.kind)
else:
delete = True
if delete:
self.cleanup()
def _create(self, kind: str) -> str:
"""Create a temporary directory and store its path in self.path"""
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-"))
logger.debug("Created temporary directory: %s", path)
return path
def cleanup(self) -> None:
"""Remove the temporary directory created and reset state"""
self._deleted = True
if not os.path.exists(self._path):
return
errors: list[BaseException] = []
def onerror(
func: Callable[..., Any],
path: Path,
exc_val: BaseException,
) -> None:
"""Log a warning for a `rmtree` error and continue"""
formatted_exc = "\n".join(
traceback.format_exception_only(type(exc_val), exc_val)
)
formatted_exc = formatted_exc.rstrip() # remove trailing new line
if func in (os.unlink, os.remove, os.rmdir):
logger.debug(
"Failed to remove a temporary file '%s' due to %s.\n",
path,
formatted_exc,
)
else:
logger.debug("%s failed with %s.", func.__qualname__, formatted_exc)
errors.append(exc_val)
if self.ignore_cleanup_errors:
try:
# first try with @retry; retrying to handle ephemeral errors
rmtree(self._path, ignore_errors=False)
except OSError:
# last pass ignore/log all errors
rmtree(self._path, onexc=onerror)
if errors:
logger.warning(
"Failed to remove contents in a temporary directory '%s'.\n"
"You can safely remove it manually.",
self._path,
)
else:
rmtree(self._path)
| TempDirectory |
python | pandas-dev__pandas | pandas/tests/indexing/test_chaining_and_caching.py | {
"start": 306,
"end": 2280
} | class ____:
@pytest.mark.parametrize("do_ref", [True, False])
def test_setitem_cache_updating(self, do_ref):
# GH 5424
cont = ["one", "two", "three", "four", "five", "six", "seven"]
df = DataFrame({"a": cont, "b": cont[3:] + cont[:3], "c": np.arange(7)})
# ref the cache
if do_ref:
df.loc[0, "c"]
# set it
df.loc[7, "c"] = 1
assert df.loc[0, "c"] == 0.0
assert df.loc[7, "c"] == 1.0
def test_setitem_cache_updating_slices(self):
# GH 7084
# not updating cache on series setting with slices
expected = DataFrame(
{"A": [600, 600, 600]}, index=date_range("5/7/2014", "5/9/2014")
)
out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014"))
df = DataFrame({"C": ["A", "A", "A"], "D": [100, 200, 300]})
# loop through df to update out
six = Timestamp("5/7/2014")
eix = Timestamp("5/9/2014")
for ix, row in df.iterrows():
out.loc[six:eix, row["C"]] = out.loc[six:eix, row["C"]] + row["D"]
tm.assert_frame_equal(out, expected)
tm.assert_series_equal(out["A"], expected["A"])
# try via a chain indexing
# this actually works
out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014"))
out_original = out.copy()
for ix, row in df.iterrows():
v = out[row["C"]][six:eix] + row["D"]
with tm.raises_chained_assignment_error():
out[row["C"]][six:eix] = v
tm.assert_frame_equal(out, out_original)
tm.assert_series_equal(out["A"], out_original["A"])
out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014"))
for ix, row in df.iterrows():
out.loc[six:eix, row["C"]] += row["D"]
tm.assert_frame_equal(out, expected)
tm.assert_series_equal(out["A"], expected["A"])
| TestCaching |
python | scikit-learn__scikit-learn | sklearn/tests/metadata_routing_common.py | {
"start": 14929,
"end": 15492
} | class ____(_Scorer):
def __init__(self, registry=None):
super().__init__(
score_func=mean_squared_error, sign=1, kwargs={}, response_method="predict"
)
self.registry = registry
def _score(self, method_caller, clf, X, y, **kwargs):
if self.registry is not None:
self.registry.append(self)
record_metadata_not_default(self, **kwargs)
sample_weight = kwargs.get("sample_weight", None)
return super()._score(method_caller, clf, X, y, sample_weight=sample_weight)
| ConsumingScorer |
python | ray-project__ray | rllib/examples/rl_modules/classes/modelv2_to_rlm.py | {
"start": 958,
"end": 9244
} | class ____(TorchRLModule, ValueFunctionAPI):
"""An RLModule containing a (old stack) ModelV2.
The `ModelV2` may be define either through
- an existing Policy checkpoint
- an existing Algorithm checkpoint (and a policy ID or "default_policy")
- or through an AlgorithmConfig object
The ModelV2 is created in the `setup` and contines to live through the lifetime
of the RLModule.
"""
@override(TorchRLModule)
def setup(self):
# Try extracting the policy ID from this RLModule's config dict.
policy_id = self.model_config.get("policy_id", DEFAULT_POLICY_ID)
# Try getting the algorithm checkpoint from the `model_config`.
algo_checkpoint_dir = self.model_config.get("algo_checkpoint_dir")
if algo_checkpoint_dir:
algo_checkpoint_dir = pathlib.Path(algo_checkpoint_dir)
if not algo_checkpoint_dir.is_dir():
raise ValueError(
"The `model_config` of your RLModule must contain a "
"`algo_checkpoint_dir` key pointing to the algo checkpoint "
"directory! You can find this dir inside the results dir of your "
"experiment. You can then add this path "
"through `config.rl_module(model_config={"
"'algo_checkpoint_dir': [your algo checkpoint dir]})`."
)
policy_checkpoint_dir = algo_checkpoint_dir / "policies" / policy_id
# Try getting the policy checkpoint from the `model_config`.
else:
policy_checkpoint_dir = self.model_config.get("policy_checkpoint_dir")
# Create the ModelV2 from the Policy.
if policy_checkpoint_dir:
policy_checkpoint_dir = pathlib.Path(policy_checkpoint_dir)
if not policy_checkpoint_dir.is_dir():
raise ValueError(
"The `model_config` of your RLModule must contain a "
"`policy_checkpoint_dir` key pointing to the policy checkpoint "
"directory! You can find this dir under the Algorithm's checkpoint "
"dir in subdirectory: [algo checkpoint dir]/policies/[policy ID "
"ex. `default_policy`]. You can then add this path through `config"
".rl_module(model_config={'policy_checkpoint_dir': "
"[your policy checkpoint dir]})`."
)
# Create a temporary policy object.
policy = TorchPolicyV2.from_checkpoint(policy_checkpoint_dir)
# Create the ModelV2 from scratch using the config.
else:
config = self.model_config.get("old_api_stack_algo_config")
if not config:
raise ValueError(
"The `model_config` of your RLModule must contain a "
"`algo_config` key with a AlgorithmConfig object in it that "
"contains all the settings that would be necessary to construct a "
"old API stack Algorithm/Policy/ModelV2! You can add this setting "
"through `config.rl_module(model_config={'algo_config': "
"[your old config]})`."
)
# Get the multi-agent policies dict.
policy_dict, _ = config.get_multi_agent_setup(
spaces={
policy_id: (self.observation_space, self.action_space),
},
default_policy_class=config.algo_class.get_default_policy_class(config),
)
config = config.to_dict()
config["__policy_id"] = policy_id
policy = policy_dict[policy_id].policy_class(
self.observation_space,
self.action_space,
config,
)
self._model_v2 = policy.model
# Translate the action dist classes from the old API stack to the new.
self.action_dist_class = self._translate_dist_class(policy.dist_class)
# Erase the torch policy from memory, so it can be garbage collected.
del policy
@override(TorchRLModule)
def _forward_inference(self, batch: Dict[str, Any], **kwargs) -> Dict[str, Any]:
return self._forward_pass(batch, inference=True)
@override(TorchRLModule)
def _forward_exploration(self, batch: Dict[str, Any], **kwargs) -> Dict[str, Any]:
return self._forward_inference(batch, **kwargs)
@override(TorchRLModule)
def _forward_train(self, batch: Dict[str, Any], **kwargs) -> Dict[str, Any]:
out = self._forward_pass(batch, inference=False)
out[Columns.ACTION_LOGP] = self.get_train_action_dist_cls()(
out[Columns.ACTION_DIST_INPUTS]
).logp(batch[Columns.ACTIONS])
out[Columns.VF_PREDS] = self._model_v2.value_function()
if Columns.STATE_IN in batch and Columns.SEQ_LENS in batch:
out[Columns.VF_PREDS] = torch.reshape(
out[Columns.VF_PREDS], [len(batch[Columns.SEQ_LENS]), -1]
)
return out
def _forward_pass(self, batch, inference=True):
# Translate states and seq_lens into old API stack formats.
batch = batch.copy()
state_in = batch.pop(Columns.STATE_IN, {})
state_in = [s for i, s in sorted(state_in.items())]
seq_lens = batch.pop(Columns.SEQ_LENS, None)
if state_in:
if inference and seq_lens is None:
seq_lens = torch.tensor(
[1.0] * state_in[0].shape[0], device=state_in[0].device
)
elif not inference:
assert seq_lens is not None
# Perform the actual ModelV2 forward pass.
# A recurrent ModelV2 adds and removes the time-rank itself (whereas in the
# new API stack, the connector pipelines are responsible for doing this) ->
# We have to remove, then re-add the time rank here to make ModelV2 work.
batch = tree.map_structure(
lambda s: torch.reshape(s, [-1] + list(s.shape[2:])), batch
)
nn_output, state_out = self._model_v2(batch, state_in, seq_lens)
# Put back 1ts time rank into nn-output (inference).
if state_in:
if inference:
nn_output = tree.map_structure(
lambda s: torch.unsqueeze(s, axis=1), nn_output
)
else:
nn_output = tree.map_structure(
lambda s: torch.reshape(s, [len(seq_lens), -1] + list(s.shape[1:])),
nn_output,
)
# Interpret the NN output as action logits.
output = {Columns.ACTION_DIST_INPUTS: nn_output}
# Add the `state_out` to the `output`, new API stack style.
if state_out:
output[Columns.STATE_OUT] = {}
for i, o in enumerate(state_out):
output[Columns.STATE_OUT][i] = o
return output
@override(ValueFunctionAPI)
def compute_values(self, batch: Dict[str, Any], embeddings: Optional[Any] = None):
self._forward_pass(batch, inference=False)
v_preds = self._model_v2.value_function()
if Columns.STATE_IN in batch and Columns.SEQ_LENS in batch:
v_preds = torch.reshape(v_preds, [len(batch[Columns.SEQ_LENS]), -1])
return v_preds
@override(TorchRLModule)
def get_initial_state(self):
"""Converts the initial state list of ModelV2 into a dict (new API stack)."""
init_state_list = self._model_v2.get_initial_state()
return dict(enumerate(init_state_list))
def _translate_dist_class(self, old_dist_class):
map_ = {
OldTorchCategorical: TorchCategorical,
OldTorchDiagGaussian: TorchDiagGaussian,
OldTorchMultiActionDistribution: TorchMultiDistribution,
OldTorchMultiCategorical: TorchMultiCategorical,
OldTorchSquashedGaussian: TorchSquashedGaussian,
}
if old_dist_class not in map_:
raise ValueError(
f"ModelV2ToRLModule does NOT support {old_dist_class} action "
f"distributions yet!"
)
return map_[old_dist_class]
| ModelV2ToRLModule |
python | kamyu104__LeetCode-Solutions | Python/check-if-all-the-integers-in-a-range-are-covered.py | {
"start": 65,
"end": 662
} | class ____(object):
def isCovered(self, ranges, left, right):
"""
:type ranges: List[List[int]]
:type left: int
:type right: int
:rtype: bool
"""
RANGE_SIZE = 50
interval = [0]*(RANGE_SIZE+1)
for l, r in ranges:
interval[l-1] += 1
interval[(r-1)+1] -= 1
cnt = 0
for i in xrange((right-1)+1):
cnt += interval[i]
if i >= left-1 and not cnt:
return False
return True
# Time: O(nlogn)
# Space: O(1)
# if r is big, this is better
| Solution |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/read_relationships/tutorial002.py | {
"start": 338,
"end": 4085
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
team: Optional[Team] = Relationship(back_populates="heroes")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team = team_preventers
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Updated hero:", hero_spider_boy)
hero_black_lion = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E")
team_wakaland = Team(
name="Wakaland",
headquarters="Wakaland Capital City",
heroes=[hero_black_lion, hero_sure_e],
)
session.add(team_wakaland)
session.commit()
session.refresh(team_wakaland)
print("Team Wakaland:", team_wakaland)
hero_tarantula = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_dr_weird = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_cap = Hero(
name="Captain North America", secret_name="Esteban Rogelios", age=93
)
team_preventers.heroes.append(hero_tarantula)
team_preventers.heroes.append(hero_dr_weird)
team_preventers.heroes.append(hero_cap)
session.add(team_preventers)
session.commit()
session.refresh(hero_tarantula)
session.refresh(hero_dr_weird)
session.refresh(hero_cap)
print("Preventers new hero:", hero_tarantula)
print("Preventers new hero:", hero_dr_weird)
print("Preventers new hero:", hero_cap)
def select_heroes():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Preventers")
result = session.exec(statement)
team_preventers = result.one()
print("Preventers heroes:", team_preventers.heroes)
def update_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Spider-Boy")
result = session.exec(statement)
hero_spider_boy = result.one()
hero_spider_boy.team = None
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Spider-Boy without team:", hero_spider_boy)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
update_heroes()
if __name__ == "__main__":
main()
| Hero |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_comprehend.py | {
"start": 1394,
"end": 1621
} | class ____:
@pytest.fixture(autouse=True)
def mock_conn(self, monkeypatch):
self.client = boto3.client("comprehend")
monkeypatch.setattr(ComprehendHook, "conn", self.client)
| TestComprehendCustomWaitersBase |
python | kamyu104__LeetCode-Solutions | Python/longest-semi-repeating-subarray.py | {
"start": 805,
"end": 1498
} | class ____(object):
def longestSubarray(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
cnt = collections.defaultdict(int)
result = left = repeat = 0
for right in xrange(len(nums)):
cnt[nums[right]] += 1
if cnt[nums[right]] == 2:
repeat += 1
while repeat > k:
if cnt[nums[left]] == 2:
repeat -= 1
cnt[nums[left]] -= 1
if not cnt[nums[left]]:
del cnt[nums[left]]
left += 1
result = max(result, right-left+1)
return result
| Solution2 |
python | google__jax | jax/_src/hijax.py | {
"start": 10134,
"end": 16112
} | class ____:
in_avals: tuple[PyTreeOfAvals, ...]
out_aval: PyTreeOfAvals
params: dict[str, Hashable]
def __init__(self):
if not hasattr(self, 'in_avals'):
raise AttributeError("subclass __init__ should set `self.in_avals`")
if not hasattr(self, 'out_aval'):
raise AttributeError("subclass __init__ should set `self.out_aval`")
if not hasattr(self, 'params'):
raise AttributeError("subclass __init__ should set `self.params`")
if (type(self).vjp_bwd is not VJPHiPrimitive.vjp_bwd and
type(self).vjp_bwd_retval is not VJPHiPrimitive.vjp_bwd_retval):
raise AttributeError(f"subclass {type(self)} should not override both "
"`vjp_bwd` and `vjp_bwd_retval`")
self.in_avals_flat, self.in_tree = tree_flatten(self.in_avals)
self.out_avals_flat, self.out_tree = tree_flatten(self.out_aval)
self.__dict__.update(self.params)
# Operation implementation in terms of lojax primitives
def expand(self, *args):
raise NotImplementedError(f"subclass {type(self)} must implement `expand`")
def vjp_fwd(self, *args):
raise NotImplementedError(f"for grad support, subclass {type(self)} must "
"implement `vjp_fwd`")
def vjp_bwd(self, res, outgrad, *arg_accums):
args_grad = self.vjp_bwd_retval(res, outgrad)
tree_map(lambda acc, leaf_grad: acc.accum(leaf_grad), arg_accums, args_grad)
def vjp_bwd_retval(self, res, outgrad):
# Classic API: returns values instead of using accumulators
raise NotImplementedError(f"for grad support, subclass {type(self)} must "
"implement `vjp_bwd` or `vjp_bwd_retval`")
def batch(self, axis_data, args, dims):
raise NotImplementedError(f"for vmap support, subclass {type(self)} must "
"implement `batch`")
def jvp(self, primals, tangents):
raise NotImplementedError(f"for jvp support, subclass {type(self)} must "
"implement `jvp`")
def __call__(self, *args):
args_flat = tree_leaves_checked(self.in_tree, args)
ans_flat = call_hi_primitive_p.bind(*args_flat, prim=self)
return tree_unflatten(self.out_tree, ans_flat)
def check(self, *arg_tys):
# subclass can optionally override this to add checking logic
return
def __repr__(self):
return f"{self.__class__.__name__}[{self.params}]"
def __hash__(self):
return hash((self.__class__.__name__, tuple(self.params.items())))
def __eq__(self, other):
return type(self) is type(other) and self.params == other.params
def tree_leaves_checked(treedef_expected, tree):
flat_vals, treedef_actual = tree_flatten(tree)
assert treedef_actual == treedef_expected
return flat_vals
call_hi_primitive_p = core.Primitive("call_hi_primitive")
call_hi_primitive_p.multiple_results = True
call_hi_primitive_p.is_high = lambda *args, prim: True # type: ignore
@call_hi_primitive_p.def_abstract_eval
def _call_hi_primitive_abstract_eval(*_args, prim):
return prim.out_avals_flat
def _call_hi_primitive_to_lojax(*args_flat, prim):
args = tree_unflatten(prim.in_tree, args_flat)
return tree_leaves_checked(prim.out_tree, prim.expand(*args))
call_hi_primitive_p.to_lojax = _call_hi_primitive_to_lojax
def _call_hi_primitive_batcher(axis_data, args_flat, dims_flat, prim):
args = tree_unflatten(prim.in_tree, args_flat)
dims = tree_unflatten(prim.in_tree, dims_flat)
ans, dims = prim.batch(axis_data, args, dims)
ans_flat = tree_leaves_checked(prim.out_tree, ans)
dims_flat = prim.out_tree.flatten_up_to(dims)
return ans_flat, dims_flat
batching.fancy_primitive_batchers[call_hi_primitive_p] = _call_hi_primitive_batcher
def _call_hi_primitive_linearize(nz_in, *args_flat, prim):
assert all(nz_in)
args = tree_unflatten(prim.in_tree, args_flat)
ans, residuals = prim.vjp_fwd(*args)
# TODO(dougalm): does the fwd/bwd API force us to assume the nzs_out are all False
# (except in the case that all the nzs_in are True, which is handled in
# LinearizeTrace.ProcessPrimitive)?
ans_flat = tree_leaves_checked(prim.out_tree, ans)
nzs_out = [True for _ in ans_flat]
return (ans_flat, nzs_out, residuals, partial(fake_linear_op, prim))
def fake_linear_op(prim, rs, *tangents):
residuals_flat, residuals_tree = tree_flatten(rs)
return call_hi_primitive_linearized_p.bind(*residuals_flat, *tangents,
residuals_tree=residuals_tree, prim=prim)
ad.primitive_linearizations[call_hi_primitive_p] = _call_hi_primitive_linearize
call_hi_primitive_linearized_p = core.Primitive("call_hi_primitive_linearized")
call_hi_primitive_linearized_p.multiple_results = True
call_hi_primitive_linearized_p.is_high = lambda *args, prim, residuals_tree: True # type: ignore
@call_hi_primitive_linearized_p.def_abstract_eval
def _call_hi_primitive_linearized_abstract_eval(*_args, prim, residuals_tree):
return [t.to_tangent_aval() for t in prim.out_avals_flat] # TODO(dougalm): handle nonzeros
def _call_hi_primitive_linearized_transpose(cts_flat, *args, prim, residuals_tree):
residuals_flat, accums_flat = split_list(args, [residuals_tree.num_leaves])
residuals = tree_unflatten(residuals_tree, residuals_flat)
accums = tree_unflatten(prim.in_tree, accums_flat)
cts = tree_unflatten(prim.out_tree, cts_flat)
none = prim.vjp_bwd(residuals, cts, *accums)
assert none is None
ad.fancy_transposes[call_hi_primitive_linearized_p] = _call_hi_primitive_linearized_transpose
def _call_hi_primitive_jvp(primals, tangents, *, prim):
primals = tree_unflatten(prim.in_tree, primals)
tangents = tree_unflatten(prim.in_tree, tangents)
out_primals, out_tangents = prim.jvp(primals, tangents)
out_primals_flat = tree_leaves_checked(prim.out_tree, out_primals)
out_tangents_flat = prim.out_tree.flatten_up_to(out_tangents)
return out_primals_flat, out_tangents_flat
ad.primitive_jvps[call_hi_primitive_p] = _call_hi_primitive_jvp
| VJPHiPrimitive |
python | dagster-io__dagster | python_modules/libraries/dagster-snowflake-polars/dagster_snowflake_polars/snowflake_polars_type_handler.py | {
"start": 5628,
"end": 12518
} | class ____(SnowflakeIOManager):
"""An I/O manager definition that reads inputs from and writes Polars DataFrames to Snowflake. When
using the SnowflakePolarsIOManager, any inputs and outputs without type annotations will be loaded
as Polars DataFrames.
Returns:
IOManagerDefinition
Examples:
.. code-block:: python
from dagster_snowflake_polars import SnowflakePolarsIOManager
from dagster import asset, Definitions, EnvVar
import polars as pl
@asset(
key_prefix=["my_schema"], # will be used as the schema in snowflake
)
def my_table() -> pl.DataFrame: # the name of the asset will be the table name
...
defs = Definitions(
assets=[my_table],
resources={
"io_manager": SnowflakePolarsIOManager(database="MY_DATABASE", account=EnvVar("SNOWFLAKE_ACCOUNT"))
}
)
You can set a default schema to store the assets using the ``schema`` configuration value of the Snowflake I/O
Manager. This schema will be used if no other schema is specified directly on an asset or op.
.. code-block:: python
defs = Definitions(
assets=[my_table],
resources={
"io_manager": SnowflakePolarsIOManager(database="my_database", schema="my_schema")
}
)
On individual assets, you can also specify the schema where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_schema"], # will be used as the schema in snowflake
)
def my_table() -> pl.DataFrame:
...
@asset(
metadata={"schema": "my_schema"} # will be used as the schema in snowflake
)
def my_other_table() -> pl.DataFrame:
...
For ops, the schema can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pl.DataFrame:
...
If none of these is provided, the schema will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pl.DataFrame) -> pl.DataFrame:
# my_table will just contain the data from column "a"
...
"""
connector: str = "adbc"
@classmethod
def _is_dagster_maintained(cls) -> bool:
return False
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePolarsTypeHandler()]
@staticmethod
def default_load_type() -> Optional[type]:
return pl.DataFrame
@beta
@io_manager(config_schema=SnowflakePolarsIOManager.to_config_schema())
def snowflake_polars_io_manager(init_context):
"""An I/O manager definition that reads inputs from and writes Polars DataFrames to Snowflake. When
using the snowflake_polars_io_manager, any inputs and outputs without type annotations will be loaded
as Polars DataFrames.
Returns:
IOManagerDefinition
Examples:
.. code-block:: python
from dagster_snowflake_polars import snowflake_polars_io_manager
from dagster import asset, Definitions
import polars as pl
@asset(
key_prefix=["my_schema"], # will be used as the schema in snowflake
)
def my_table() -> pl.DataFrame: # the name of the asset will be the table name
...
defs = Definitions(
assets=[my_table],
resources={
"io_manager": snowflake_polars_io_manager.configured({
"database": "my_database",
"account": {"env": "SNOWFLAKE_ACCOUNT"}
})
}
)
You can set a default schema to store the assets using the ``schema`` configuration value of the Snowflake I/O
Manager. This schema will be used if no other schema is specified directly on an asset or op.
.. code-block:: python
defs = Definitions(
assets=[my_table],
resources={"io_manager": snowflake_polars_io_manager.configured(
{"database": "my_database", "schema": "my_schema"} # will be used as the schema
)}
)
On individual assets, you can also specify the schema where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_schema"], # will be used as the schema in snowflake
)
def my_table() -> pl.DataFrame:
...
@asset(
metadata={"schema": "my_schema"} # will be used as the schema in snowflake
)
def my_other_table() -> pl.DataFrame:
...
For ops, the schema can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pl.DataFrame:
...
If none of these is provided, the schema will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pl.DataFrame) -> pl.DataFrame:
# my_table will just contain the data from column "a"
...
"""
return DbIOManager(
type_handlers=[SnowflakePolarsTypeHandler()],
db_client=SnowflakeDbClient(),
io_manager_name="SnowflakeIOManager",
database=init_context.resource_config["database"],
schema=init_context.resource_config.get("schema"),
default_load_type=pl.DataFrame,
)
| SnowflakePolarsIOManager |
python | keras-team__keras | keras/src/ops/image.py | {
"start": 311,
"end": 2714
} | class ____(Operation):
def __init__(self, data_format=None, *, name=None):
super().__init__(name=name)
self.data_format = backend.standardize_data_format(data_format)
def call(self, images):
return backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
def compute_output_spec(self, images):
images_shape = list(images.shape)
if len(images_shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). "
f"Received: images.shape={images_shape}"
)
if self.data_format == "channels_last":
images_shape[-1] = 1
else:
images_shape[-3] = 1
return KerasTensor(shape=images_shape, dtype=images.dtype)
@keras_export("keras.ops.image.rgb_to_grayscale")
def rgb_to_grayscale(images, data_format=None):
"""Convert RGB images to grayscale.
This function converts RGB images to grayscale images. It supports both
3D and 4D tensors.
Args:
images: Input image or batch of images. Must be 3D or 4D.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
Grayscale image or batch of grayscale images.
Examples:
>>> import numpy as np
>>> from keras import ops
>>> x = np.random.random((2, 4, 4, 3))
>>> y = ops.image.rgb_to_grayscale(x)
>>> y.shape
(2, 4, 4, 1)
>>> x = np.random.random((4, 4, 3)) # Single RGB image
>>> y = ops.image.rgb_to_grayscale(x)
>>> y.shape
(4, 4, 1)
>>> x = np.random.random((2, 3, 4, 4))
>>> y = ops.image.rgb_to_grayscale(x, data_format="channels_first")
>>> y.shape
(2, 1, 4, 4)
"""
if any_symbolic_tensors((images,)):
return RGBToGrayscale(data_format=data_format).symbolic_call(images)
return backend.image.rgb_to_grayscale(images, data_format=data_format)
| RGBToGrayscale |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 88979,
"end": 89565
} | class ____(unittest.TestCase):
def test_no_parent(self):
from .cyclic import self_ref
obj = self_ref.TreeElementFactory(parent__parent__parent=None)
self.assertIsNone(obj.parent.parent.parent)
def test_deep(self):
from .cyclic import self_ref
obj = self_ref.TreeElementFactory(parent__parent__parent__parent=None)
self.assertIsNotNone(obj.parent)
self.assertIsNotNone(obj.parent.parent)
self.assertIsNotNone(obj.parent.parent.parent)
self.assertIsNone(obj.parent.parent.parent.parent)
| SelfReferentialTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.