language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | plotly__plotly.py | plotly/graph_objs/funnel/marker/colorbar/_tickfont.py | {
"start": 233,
"end": 9949
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnel.marker.colorbar"
_path_str = "funnel.marker.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.funnel.marker.
colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnel.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnel.marker.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | ray-project__ray | python/ray/dashboard/modules/reporter/reporter_agent.py | {
"start": 10946,
"end": 66535
} | class ____(
dashboard_utils.DashboardAgentModule,
reporter_pb2_grpc.ReporterServiceServicer,
metrics_service_pb2_grpc.MetricsServiceServicer,
):
"""A monitor process for monitoring Ray nodes.
Attributes:
dashboard_agent: The DashboardAgent object contains global config
raylet_client: The RayletClient object to access raylet server
"""
def __init__(self, dashboard_agent, raylet_client=None):
"""Initialize the reporter object."""
super().__init__(dashboard_agent)
if IN_KUBERNETES_POD or IN_CONTAINER:
# psutil does not give a meaningful logical cpu count when in a K8s pod, or
# in a container in general.
# Use ray._private.utils for this instead.
logical_cpu_count = utils.get_num_cpus(override_docker_cpu_warning=True)
# (Override the docker warning to avoid dashboard log spam.)
# The dashboard expects a physical CPU count as well.
# This is not always meaningful in a container, but we will go ahead
# and give the dashboard what it wants using psutil.
physical_cpu_count = psutil.cpu_count(logical=False)
else:
logical_cpu_count = psutil.cpu_count()
physical_cpu_count = psutil.cpu_count(logical=False)
self._cpu_counts = (logical_cpu_count, physical_cpu_count)
self._gcs_client = dashboard_agent.gcs_client
self._ip = dashboard_agent.ip
self._log_dir = dashboard_agent.log_dir
self._is_head_node = self._ip == parse_address(dashboard_agent.gcs_address)[0]
self._hostname = socket.gethostname()
# (pid, created_time) -> psutil.Process
self._workers = {}
# psutil.Process of the parent.
self._raylet_proc = None
# psutil.Process of the current process.
self._agent_proc = None
# The last reported worker proc names (e.g., ray::*).
self._latest_worker_proc_names = set()
self._latest_gpu_worker_proc_names = set()
self._network_stats_hist = [(0, (0.0, 0.0))] # time, (sent, recv)
self._disk_io_stats_hist = [
(0, (0.0, 0.0, 0, 0))
] # time, (bytes read, bytes written, read ops, write ops)
self._metrics_collection_disabled = dashboard_agent.metrics_collection_disabled
self._metrics_agent = None
self._open_telemetry_metric_recorder = None
self._session_name = dashboard_agent.session_name
if not self._metrics_collection_disabled:
try:
stats_exporter = prometheus_exporter.new_stats_exporter(
prometheus_exporter.Options(
namespace="ray",
port=dashboard_agent.metrics_export_port,
address="127.0.0.1" if self._ip == "127.0.0.1" else "",
)
)
except Exception:
# TODO(SongGuyang): Catch the exception here because there is
# port conflict issue which brought from static port. We should
# remove this after we find better port resolution.
logger.exception(
"Failed to start prometheus stats exporter. Agent will stay "
"alive but disable the stats."
)
stats_exporter = None
self._metrics_agent = MetricsAgent(
stats_module.stats.view_manager,
stats_module.stats.stats_recorder,
stats_exporter,
)
self._open_telemetry_metric_recorder = OpenTelemetryMetricRecorder()
if self._metrics_agent.proxy_exporter_collector:
# proxy_exporter_collector is None
# if Prometheus server is not started.
REGISTRY.register(self._metrics_agent.proxy_exporter_collector)
self._key = (
f"{reporter_consts.REPORTER_PREFIX}" f"{self._dashboard_agent.node_id}"
)
self._executor = ThreadPoolExecutor(
max_workers=RAY_DASHBOARD_REPORTER_AGENT_TPE_MAX_WORKERS,
thread_name_prefix="reporter_agent_executor",
)
self._gcs_pid = None
self._gcs_proc = None
self._gpu_profiling_manager = GpuProfilingManager(
profile_dir_path=self._log_dir, ip_address=self._ip
)
self._gpu_profiling_manager.start_monitoring_daemon()
# Create GPU metric provider instance
self._gpu_metric_provider = GpuMetricProvider()
if raylet_client:
self._raylet_client = raylet_client
else:
self._raylet_client = RayletClient(
ip_address=self._ip, port=self._dashboard_agent.node_manager_port
)
async def GetTraceback(self, request, context):
pid = request.pid
native = request.native
p = CpuProfilingManager(self._log_dir)
success, output = await p.trace_dump(pid, native=native)
return reporter_pb2.GetTracebackReply(output=output, success=success)
async def CpuProfiling(self, request, context):
pid = request.pid
duration = request.duration
format = request.format
native = request.native
p = CpuProfilingManager(self._log_dir)
success, output = await p.cpu_profile(
pid, format=format, duration=duration, native=native
)
return reporter_pb2.CpuProfilingReply(output=output, success=success)
async def GpuProfiling(self, request, context):
pid = request.pid
num_iterations = request.num_iterations
success, output = await self._gpu_profiling_manager.gpu_profile(
pid=pid, num_iterations=num_iterations
)
return reporter_pb2.GpuProfilingReply(success=success, output=output)
async def MemoryProfiling(self, request, context):
pid = request.pid
format = request.format
leaks = request.leaks
duration = request.duration
native = request.native
trace_python_allocators = request.trace_python_allocators
p = MemoryProfilingManager(self._log_dir)
success, profiler_filename, output = await p.attach_profiler(
pid, native=native, trace_python_allocators=trace_python_allocators
)
if not success:
return reporter_pb2.MemoryProfilingReply(output=output, success=success)
# add 1 second sleep for memray overhead
await asyncio.sleep(duration + 1)
success, output = await p.detach_profiler(pid)
warning = None if success else output
success, output = await p.get_profile_result(
pid, profiler_filename=profiler_filename, format=format, leaks=leaks
)
return reporter_pb2.MemoryProfilingReply(
output=output, success=success, warning=warning
)
async def HealthCheck(
self,
_request: reporter_pb2.HealthCheckRequest,
_context: ServicerContext,
) -> reporter_pb2.HealthCheckReply:
"""This is a health check endpoint for the reporter agent.
It is used to check if the reporter agent is ready to receive requests.
"""
return reporter_pb2.HealthCheckReply()
async def ReportOCMetrics(self, request, context):
# Do nothing if metrics collection is disabled.
if self._metrics_collection_disabled:
return reporter_pb2.ReportOCMetricsReply()
# This function receives a GRPC containing OpenCensus (OC) metrics
# from a Ray process, then exposes those metrics to Prometheus.
try:
worker_id = WorkerID(request.worker_id)
worker_id = None if worker_id.is_nil() else worker_id.hex()
self._metrics_agent.proxy_export_metrics(request.metrics, worker_id)
except Exception:
logger.error(traceback.format_exc())
return reporter_pb2.ReportOCMetricsReply()
def _export_histogram_data(
self,
metric: Metric,
) -> None:
"""
TODO(can-anyscale): once we launch the new open-telemetry stack, we need to
document and communicate that the histogram metric is an approximation to users.
The approximation is good enough for the dashboard to display the histogram
distribution. Only the sum of all data points will be the approximation. See
https://github.com/ray-project/ray/issues/54538 for the complete backlog of Ray
metric infra improvements.
Export histogram data points to OpenTelemetry Metric Recorder. A histogram
metric is aggregated into several internal representations in C++ side:
- sum of all buckets
- count of all buckets
- count per bucket
We reconstruct the histogram data points from these internal representations
and export them to OpenTelemetry Metric Recorder. The reconstruction is an
approximation, but it is good enough for the dashboard to display the histogram
data points.
"""
data_points = metric.histogram.data_points
if not data_points:
return
self._open_telemetry_metric_recorder.register_histogram_metric(
metric.name,
metric.description,
data_points[0].explicit_bounds,
)
for data_point in data_points:
if data_point.count == 0:
continue
bucket_midpoints = (
self._open_telemetry_metric_recorder.get_histogram_bucket_midpoints(
metric.name
)
)
assert len(bucket_midpoints) == len(data_point.bucket_counts)
tags = {tag.key: tag.value.string_value for tag in data_point.attributes}
for i, bucket_count in enumerate(data_point.bucket_counts):
if bucket_count == 0:
continue
bucket_midpoint = bucket_midpoints[i]
for _ in range(bucket_count):
self._open_telemetry_metric_recorder.set_metric_value(
metric.name,
tags,
bucket_midpoint,
)
def _export_number_data(
self,
metric: Metric,
) -> None:
data_points = []
if metric.WhichOneof("data") == "gauge":
self._open_telemetry_metric_recorder.register_gauge_metric(
metric.name,
metric.description,
)
data_points = metric.gauge.data_points
if metric.WhichOneof("data") == "sum":
if metric.sum.is_monotonic:
self._open_telemetry_metric_recorder.register_counter_metric(
metric.name,
metric.description,
)
else:
self._open_telemetry_metric_recorder.register_sum_metric(
metric.name,
metric.description,
)
data_points = metric.sum.data_points
for data_point in data_points:
self._open_telemetry_metric_recorder.set_metric_value(
metric.name,
{tag.key: tag.value.string_value for tag in data_point.attributes},
# Note that all data points received from other Ray components are
# always double values. This is because the c++ apis
# (open_telemetry_metric_recorder.cc) only create metrics with double
# values.
data_point.as_double,
)
async def Export(
self,
request: metrics_service_pb2.ExportMetricsServiceRequest,
context: ServicerContext,
) -> metrics_service_pb2.ExportMetricsServiceResponse:
"""
GRPC method that receives the open telemetry metrics exported from other Ray
components running in the same node (e.g., raylet, worker, etc.). This method
implements an interface of `metrics_service_pb2_grpc.MetricsServiceServicer` (https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/collector/metrics/v1/metrics_service.proto#L30),
which is the default open-telemetry metrics service interface.
"""
for resource_metrics in request.resource_metrics:
for scope_metrics in resource_metrics.scope_metrics:
for metric in scope_metrics.metrics:
if metric.WhichOneof("data") == "histogram":
self._export_histogram_data(metric)
else:
self._export_number_data(metric)
return metrics_service_pb2.ExportMetricsServiceResponse()
@staticmethod
def _get_cpu_percent(in_k8s: bool):
if in_k8s:
return k8s_utils.cpu_percent()
else:
return psutil.cpu_percent()
def _get_gpu_usage(self):
"""Get GPU usage information using the GPU metric provider."""
return self._gpu_metric_provider.get_gpu_usage()
@staticmethod
def _get_tpu_usage() -> List[TpuUtilizationInfo]:
global enable_tpu_usage_check
if not enable_tpu_usage_check:
return []
if not TPU_DEVICE_PLUGIN_ADDR:
enable_tpu_usage_check = False
return []
endpoint = f"http://{TPU_DEVICE_PLUGIN_ADDR}/metrics"
try:
metrics = requests.get(endpoint).content
metrics = metrics.decode("utf-8")
except Exception as e:
logger.debug(
f"Failed to retrieve TPU information from device plugin: {endpoint} {e}"
)
enable_tpu_usage_check = False
return []
tpu_utilizations = []
# Sample should look like:
# Name: tensorcore_utilization_node Labels: {'accelerator_id': '4804690994094478883-0', 'make': 'cloud-tpu', 'model': 'tpu-v6e-slice', 'tpu_topology': '2x4'} Value: 0.0
# See https://cloud.google.com/monitoring/api/metrics_gcp#gcp-tpu for
# schema.
try:
for family in text_string_to_metric_families(metrics):
for sample in family.samples:
# Skip irrelevant metrics
if not hasattr(sample, "labels"):
continue
if "accelerator_id" not in sample.labels:
continue
labels = sample.labels
accelerator_id = labels["accelerator_id"]
index = accelerator_id.split("-")[1]
if sample.name == "memory_bandwidth_utilization":
info = TpuUtilizationInfo(
index=index,
name=accelerator_id,
tpu_type=labels["model"],
tpu_topology=labels["tpu_topology"],
tensorcore_utilization=0.0,
hbm_utilization=sample.value,
duty_cycle=0.0,
memory_used=0,
memory_total=0,
)
tpu_utilizations.append(info)
if sample.name == "tensorcore_utilization":
info = TpuUtilizationInfo(
index=index,
name=accelerator_id,
tpu_type=labels["model"],
tpu_topology=labels["tpu_topology"],
tensorcore_utilization=sample.value,
hbm_utilization=0.0,
duty_cycle=0.0,
memory_used=0,
memory_total=0,
)
tpu_utilizations.append(info)
if sample.name == "duty_cycle":
info = TpuUtilizationInfo(
index=index,
name=accelerator_id,
tpu_type=labels["model"],
tpu_topology=labels["tpu_topology"],
tensorcore_utilization=0.0,
hbm_utilization=0.0,
duty_cycle=sample.value,
memory_used=0,
memory_total=0,
)
tpu_utilizations.append(info)
if sample.name == "memory_used":
info = TpuUtilizationInfo(
index=index,
name=accelerator_id,
tpu_type=labels["model"],
tpu_topology=labels["tpu_topology"],
tensorcore_utilization=0.0,
hbm_utilization=0.0,
duty_cycle=0.0,
memory_used=sample.value,
memory_total=0,
)
tpu_utilizations.append(info)
if sample.name == "memory_total":
info = TpuUtilizationInfo(
index=index,
name=accelerator_id,
tpu_type=labels["model"],
tpu_topology=labels["tpu_topology"],
tensorcore_utilization=0.0,
hbm_utilization=0.0,
duty_cycle=0.0,
memory_used=0,
memory_total=sample.value,
)
tpu_utilizations.append(info)
except Exception as e:
logger.debug(f"Failed to parse metrics from device plugin: {metrics} {e}")
return []
# Each collected sample records only one metric (e.g. duty cycle) during
# the metric interval for one TPU. So here we need to aggregate the
# sample records together. The aggregated list should be indexed by the
# TPU accelerator index.
merged_tpu_utilizations = {}
for info in tpu_utilizations:
index = int(info.get("index"))
if index in merged_tpu_utilizations:
merged_info = merged_tpu_utilizations[index]
merged_info["tensorcore_utilization"] += info.get(
"tensorcore_utilization"
)
merged_info["hbm_utilization"] += info.get("hbm_utilization")
merged_info["duty_cycle"] += info.get("duty_cycle")
merged_info["memory_used"] += info.get("memory_used")
merged_info["memory_total"] += info.get("memory_total")
else:
merged_info = TpuUtilizationInfo(
index=info.get("index"),
name=info.get("name"),
tpu_type=info.get("tpu_type"),
tpu_topology=info.get("tpu_topology"),
tensorcore_utilization=info.get("tensorcore_utilization"),
hbm_utilization=info.get("hbm_utilization"),
duty_cycle=info.get("duty_cycle"),
memory_used=info.get("memory_used"),
memory_total=info.get("memory_total"),
)
merged_tpu_utilizations[index] = merged_info
sorted_tpu_utilizations = [
value for _, value in sorted(merged_tpu_utilizations.items())
]
return sorted_tpu_utilizations
@staticmethod
def _get_boot_time():
if IN_KUBERNETES_POD:
# Return start time of container entrypoint
return psutil.Process(pid=1).create_time()
else:
return psutil.boot_time()
@staticmethod
def _get_network_stats():
ifaces = [
v for k, v in psutil.net_io_counters(pernic=True).items() if k[0] == "e"
]
sent = sum((iface.bytes_sent for iface in ifaces))
recv = sum((iface.bytes_recv for iface in ifaces))
return sent, recv
@staticmethod
def _get_mem_usage():
total = get_system_memory()
used = utils.get_used_memory()
available = total - used
percent = round(used / total, 3) * 100
return total, available, percent, used
@staticmethod
def _get_disk_usage():
if IN_KUBERNETES_POD and not ENABLE_K8S_DISK_USAGE:
# If in a K8s pod, disable disk display by passing in dummy values.
return {
"/": psutil._common.sdiskusage(total=1, used=0, free=1, percent=0.0)
}
if sys.platform == "win32":
root = psutil.disk_partitions()[0].mountpoint
else:
root = os.sep
tmp = get_user_temp_dir()
return {
"/": psutil.disk_usage(root),
tmp: psutil.disk_usage(tmp),
}
@staticmethod
def _get_disk_io_stats():
stats = psutil.disk_io_counters()
# stats can be None or {} if the machine is diskless.
# https://psutil.readthedocs.io/en/latest/#psutil.disk_io_counters
if not stats:
return (0, 0, 0, 0)
else:
return (
stats.read_bytes,
stats.write_bytes,
stats.read_count,
stats.write_count,
)
async def _async_get_worker_pids_from_raylet(self) -> List[int]:
try:
# Get worker pids from raylet via gRPC.
return await self._raylet_client.async_get_worker_pids()
except (GetTimeoutError, RpcError):
logger.exception("Failed to get worker pids from raylet")
return []
def _get_agent_proc(self) -> psutil.Process:
# Agent is the current process.
# This method is not necessary, but we have it for mock testing.
return psutil.Process()
def _generate_worker_key(self, proc: psutil.Process) -> Tuple[int, float]:
return (proc.pid, proc.create_time())
async def _async_get_worker_processes(self):
pids = await self._async_get_worker_pids_from_raylet()
logger.debug(f"Worker PIDs from raylet: {pids}")
if not pids:
return []
workers = {}
for pid in pids:
try:
proc = psutil.Process(pid)
workers[self._generate_worker_key(proc)] = proc
except (psutil.NoSuchProcess, psutil.AccessDenied):
logger.error(f"Failed to access worker process {pid}")
continue
return workers
async def _async_get_workers(self, gpus: Optional[List[GpuUtilizationInfo]] = None):
workers = await self._async_get_worker_processes()
if not workers:
return []
else:
# We should keep `raylet_proc.children()` in `self` because
# when `cpu_percent` is first called, it returns the meaningless 0.
# See more: https://github.com/ray-project/ray/issues/29848
keys_to_pop = []
# Add all new workers.
for key, worker in workers.items():
if key not in self._workers:
self._workers[key] = worker
# Pop out stale workers.
for key in self._workers:
if key not in workers:
keys_to_pop.append(key)
for k in keys_to_pop:
self._workers.pop(k)
# Build process ID -> GPU info mapping for faster lookups
gpu_pid_mapping = defaultdict(list)
if gpus is not None:
for gpu in gpus:
processes = gpu.get("processes_pids")
if processes:
for proc in processes.values():
gpu_pid_mapping[proc["pid"]].append(proc)
result = []
for w in self._workers.values():
try:
if w.status() == psutil.STATUS_ZOMBIE:
continue
# Get basic process info
worker_info = w.as_dict(attrs=PSUTIL_PROCESS_ATTRS)
# Add GPU information if available
worker_pid = worker_info["pid"]
gpu_memory_usage = 0
gpu_utilization = 0
if worker_pid in gpu_pid_mapping:
# Aggregate GPU memory and utilization across all GPUs for this process
for gpu_proc in gpu_pid_mapping[worker_pid]:
gpu_memory_usage += gpu_proc["gpu_memory_usage"]
utilization = gpu_proc["gpu_utilization"] or 0
gpu_utilization += utilization
# Add GPU information to worker info
worker_info["gpu_memory_usage"] = gpu_memory_usage # in MB
worker_info["gpu_utilization"] = gpu_utilization # percentage
result.append(worker_info)
except psutil.NoSuchProcess:
# the process may have terminated due to race condition.
continue
return result
def _get_raylet_proc(self):
try:
if not self._raylet_proc:
curr_proc = psutil.Process()
# The dashboard agent is a child of the raylet process.
# It is not necessarily the direct child (python-windows
# typically uses a py.exe runner to run python), so search
# up for a process named 'raylet'
candidate = curr_proc.parent()
while candidate:
if "raylet" in candidate.name():
break
candidate = candidate.parent()
self._raylet_proc = candidate
if self._raylet_proc is not None:
if self._raylet_proc.pid == 1:
return None
if self._raylet_proc.status() == psutil.STATUS_ZOMBIE:
return None
return self._raylet_proc
except (psutil.AccessDenied, ProcessLookupError):
pass
return None
def _get_gcs(self):
if self._gcs_pid:
if not self._gcs_proc or self._gcs_pid != self._gcs_proc.pid:
self._gcs_proc = psutil.Process(self._gcs_pid)
if self._gcs_proc:
dictionary = self._gcs_proc.as_dict(attrs=PSUTIL_PROCESS_ATTRS)
return dictionary
return {}
def _get_raylet(self):
raylet_proc = self._get_raylet_proc()
if raylet_proc is None:
return None
else:
return raylet_proc.as_dict(attrs=PSUTIL_PROCESS_ATTRS)
def _get_agent(self):
# Current proc == agent proc
if not self._agent_proc:
self._agent_proc = psutil.Process()
return self._agent_proc.as_dict(attrs=PSUTIL_PROCESS_ATTRS)
def _get_load_avg(self):
if sys.platform == "win32":
cpu_percent = psutil.cpu_percent()
load = (cpu_percent, cpu_percent, cpu_percent)
else:
load = os.getloadavg()
if self._cpu_counts[0] > 0:
per_cpu_load = tuple((round(x / self._cpu_counts[0], 2) for x in load))
else:
per_cpu_load = None
return load, per_cpu_load
@staticmethod
def _compute_speed_from_hist(hist):
while len(hist) > 7:
hist.pop(0)
then, prev_stats = hist[0]
now, now_stats = hist[-1]
time_delta = now - then
return tuple((y - x) / time_delta for x, y in zip(prev_stats, now_stats))
def _get_shm_usage(self):
"""Return the shm usage.
If shm doesn't exist (e.g., MacOS), it returns None.
"""
mem = psutil.virtual_memory()
if not hasattr(mem, "shared"):
return None
return mem.shared
async def _async_collect_stats(self):
now = dashboard_utils.to_posix_time(datetime.datetime.utcnow())
network_stats = self._get_network_stats()
self._network_stats_hist.append((now, network_stats))
network_speed_stats = self._compute_speed_from_hist(self._network_stats_hist)
disk_stats = self._get_disk_io_stats()
self._disk_io_stats_hist.append((now, disk_stats))
disk_speed_stats = self._compute_speed_from_hist(self._disk_io_stats_hist)
gpus = self._get_gpu_usage()
raylet = self._get_raylet()
stats = {
"now": now,
"hostname": self._hostname,
"ip": self._ip,
"cpu": self._get_cpu_percent(IN_KUBERNETES_POD),
"cpus": self._cpu_counts,
"mem": self._get_mem_usage(),
# Unit is in bytes. None if
"shm": self._get_shm_usage(),
"workers": await self._async_get_workers(gpus),
"raylet": raylet,
"agent": self._get_agent(),
"bootTime": self._get_boot_time(),
"loadAvg": self._get_load_avg(),
"disk": self._get_disk_usage(),
"disk_io": disk_stats,
"disk_io_speed": disk_speed_stats,
"gpus": gpus,
"tpus": self._get_tpu_usage(),
"network": network_stats,
"network_speed": network_speed_stats,
# Deprecated field, should be removed with frontend.
"cmdline": raylet.get("cmdline", []) if raylet else [],
}
if self._is_head_node:
stats["gcs"] = self._get_gcs()
return stats
def _generate_reseted_stats_record(self, component_name: str) -> List[Record]:
"""Return a list of Record that will reset
the system metrics of a given component name.
Args:
component_name: a component name for a given stats.
Returns:
a list of Record instances of all values 0.
"""
tags = {"ip": self._ip, "Component": component_name}
records = []
records.append(
Record(
gauge=METRICS_GAUGES["component_cpu_percentage"],
value=0.0,
tags=tags,
)
)
records.append(
Record(
gauge=METRICS_GAUGES["component_mem_shared_bytes"],
value=0.0,
tags=tags,
)
)
records.append(
Record(
gauge=METRICS_GAUGES["component_rss_mb"],
value=0.0,
tags=tags,
)
)
records.append(
Record(
gauge=METRICS_GAUGES["component_uss_mb"],
value=0.0,
tags=tags,
)
)
records.append(
Record(
gauge=METRICS_GAUGES["component_num_fds"],
value=0,
tags=tags,
)
)
return records
def _generate_system_stats_record(
self, stats: List[dict], component_name: str, pid: Optional[str] = None
) -> List[Record]:
"""Generate a list of Record class from a given component names.
Args:
stats: a list of stats dict generated by `psutil.as_dict`.
If empty, it will create the metrics of a given "component_name"
which has all 0 values.
component_name: a component name for a given stats.
pid: optionally provided pids.
Returns:
a list of Record class that will be exposed to Prometheus.
"""
total_cpu_percentage = 0.0
total_gpu_percentage = 0.0
total_gpu_memory = 0.0
total_rss = 0.0
total_uss = 0.0
total_shm = 0.0
total_num_fds = 0
for stat in stats:
total_cpu_percentage += float(stat.get("cpu_percent", 0.0)) # noqa
# Aggregate GPU stats if available
total_gpu_percentage += float(stat.get("gpu_utilization", 0.0))
total_gpu_memory += float(stat.get("gpu_memory_usage", 0.0))
memory_info = stat.get("memory_info")
if memory_info:
mem = stat["memory_info"]
total_rss += float(mem.rss) / 1.0e6
if hasattr(mem, "shared"):
total_shm += float(mem.shared)
mem_full_info = stat.get("memory_full_info")
if mem_full_info is not None:
total_uss += float(mem_full_info.uss) / 1.0e6
total_num_fds += int(stat.get("num_fds", 0))
tags = {"ip": self._ip, "Component": component_name}
if pid:
tags["pid"] = pid
records = []
records.append(
Record(
gauge=METRICS_GAUGES["component_cpu_percentage"],
value=total_cpu_percentage,
tags=tags,
)
)
records.append(
Record(
gauge=METRICS_GAUGES["component_mem_shared_bytes"],
value=total_shm,
tags=tags,
)
)
records.append(
Record(
gauge=METRICS_GAUGES["component_rss_mb"],
value=total_rss,
tags=tags,
)
)
if total_uss > 0.0:
records.append(
Record(
gauge=METRICS_GAUGES["component_uss_mb"],
value=total_uss,
tags=tags,
)
)
records.append(
Record(
gauge=METRICS_GAUGES["component_num_fds"],
value=total_num_fds,
tags=tags,
)
)
# Add GPU records if there's GPU usage
if total_gpu_memory > 0.0:
records.append(
Record(
gauge=METRICS_GAUGES["component_gpu_memory_mb"],
value=total_gpu_memory,
tags=tags,
)
)
if total_gpu_percentage > 0.0:
records.append(
Record(
gauge=METRICS_GAUGES["component_gpu_percentage"],
value=total_gpu_percentage,
tags=tags,
)
)
return records
def _generate_reseted_gpu_stats_record(self, component_name: str) -> List[Record]:
"""Return a list of Record that will reset
the GPU metrics of a given component name.
Args:
component_name: a component name for a given stats.
Returns:
a list of Record instances of GPU metrics with all values 0.
"""
tags = {"ip": self._ip, "Component": component_name}
records = []
records.append(
Record(
gauge=METRICS_GAUGES["component_gpu_memory_mb"],
value=0.0,
tags=tags,
)
)
records.append(
Record(
gauge=METRICS_GAUGES["component_gpu_percentage"],
value=0.0,
tags=tags,
)
)
return records
def generate_worker_stats_record(self, worker_stats: List[dict]) -> List[Record]:
"""Generate a list of Record class for worker processes.
This API automatically sets the component_name of record as
the name of worker processes. I.e., ray::* so that we can report
per task/actor (grouped by a func/class name) resource usages.
Args:
worker_stats: a list of stats dict generated by `psutil.as_dict`
for worker processes. Now with gpu usage information.
"""
# worker cmd name (ray::*) -> stats dict.
proc_name_to_stats = defaultdict(list)
gpu_worker_proc_names = set() # Track processes with GPU usage
for stat in worker_stats:
cmdline = stat.get("cmdline")
# collect both worker and driver stats
if cmdline:
proc_name = cmdline[0]
proc_name_to_stats[proc_name].append(stat)
# Track if this process has GPU usage
if (
stat.get("gpu_memory_usage", 0) > 0
or stat.get("gpu_utilization", 0) > 0
):
gpu_worker_proc_names.add(proc_name)
records = []
# Generate system stats records (now includes GPU stats)
for proc_name, stats in proc_name_to_stats.items():
records.extend(self._generate_system_stats_record(stats, proc_name))
# Reset worker metrics that are from finished processes.
new_proc_names = set(proc_name_to_stats.keys())
stale_procs = self._latest_worker_proc_names - new_proc_names
self._latest_worker_proc_names = new_proc_names
for stale_proc_name in stale_procs:
records.extend(self._generate_reseted_stats_record(stale_proc_name))
# Reset GPU metrics for processes that no longer use GPU
stale_gpu_worker_proc_names = (
self._latest_gpu_worker_proc_names - gpu_worker_proc_names
)
self._latest_gpu_worker_proc_names = gpu_worker_proc_names
for stale_gpu_proc in stale_gpu_worker_proc_names:
records.extend(self._generate_reseted_gpu_stats_record(stale_gpu_proc))
return records
def _to_records(self, stats, cluster_stats) -> List[Record]:
records_reported = []
ip = stats["ip"]
ray_node_type = "head" if self._is_head_node else "worker"
is_head_node = "true" if self._is_head_node else "false"
# Common tags for node-level metrics
# We use RayNodeType to mark head/worker node, IsHeadNode is retained for backward compatibility
node_tags = {"ip": ip, "RayNodeType": ray_node_type, "IsHeadNode": is_head_node}
# -- Instance count of cluster --
# Only report cluster stats on head node
if "autoscaler_report" in cluster_stats and self._is_head_node:
active_nodes = cluster_stats["autoscaler_report"]["active_nodes"]
for node_type, active_node_count in active_nodes.items():
records_reported.append(
Record(
gauge=METRICS_GAUGES["cluster_active_nodes"],
value=active_node_count,
tags={"node_type": node_type},
)
)
failed_nodes = cluster_stats["autoscaler_report"]["failed_nodes"]
failed_nodes_dict = {}
for node_ip, node_type in failed_nodes:
if node_type in failed_nodes_dict:
failed_nodes_dict[node_type] += 1
else:
failed_nodes_dict[node_type] = 1
for node_type, failed_node_count in failed_nodes_dict.items():
records_reported.append(
Record(
gauge=METRICS_GAUGES["cluster_failed_nodes"],
value=failed_node_count,
tags={"node_type": node_type},
)
)
pending_nodes = cluster_stats["autoscaler_report"]["pending_nodes"]
pending_nodes_dict = {}
for node_ip, node_type, status_message in pending_nodes:
if node_type in pending_nodes_dict:
pending_nodes_dict[node_type] += 1
else:
pending_nodes_dict[node_type] = 1
for node_type, pending_node_count in pending_nodes_dict.items():
records_reported.append(
Record(
gauge=METRICS_GAUGES["cluster_pending_nodes"],
value=pending_node_count,
tags={"node_type": node_type},
)
)
# -- CPU per node --
cpu_usage = float(stats["cpu"])
cpu_record = Record(
gauge=METRICS_GAUGES["node_cpu_utilization"],
value=cpu_usage,
tags=node_tags,
)
cpu_count, _ = stats["cpus"]
cpu_count_record = Record(
gauge=METRICS_GAUGES["node_cpu_count"], value=cpu_count, tags=node_tags
)
# -- Mem per node --
mem_total, mem_available, _, mem_used = stats["mem"]
mem_used_record = Record(
gauge=METRICS_GAUGES["node_mem_used"], value=mem_used, tags=node_tags
)
mem_available_record = Record(
gauge=METRICS_GAUGES["node_mem_available"],
value=mem_available,
tags=node_tags,
)
mem_total_record = Record(
gauge=METRICS_GAUGES["node_mem_total"], value=mem_total, tags=node_tags
)
shm_used = stats["shm"]
if shm_used:
node_mem_shared = Record(
gauge=METRICS_GAUGES["node_mem_shared_bytes"],
value=shm_used,
tags=node_tags,
)
records_reported.append(node_mem_shared)
# The output example of GpuUtilizationInfo.
"""
{'index': 0,
'uuid': 'GPU-36e1567d-37ed-051e-f8ff-df807517b396',
'name': 'NVIDIA A10G',
'utilization_gpu': 1,
'memory_used': 0,
'memory_total': 22731}
"""
# -- GPU per node --
gpus = stats["gpus"]
gpus_available = len(gpus)
if gpus_available:
for gpu in gpus:
gpus_utilization, gram_used, gram_total = 0, 0, 0
# Consume GPU may not report its utilization.
if gpu["utilization_gpu"] is not None:
gpus_utilization += gpu["utilization_gpu"]
gram_used += gpu["memory_used"]
gram_total += gpu["memory_total"]
gpu_index = gpu.get("index")
gpu_name = gpu.get("name")
gram_available = gram_total - gram_used
if gpu_index is not None:
gpu_tags = {**node_tags, "GpuIndex": str(gpu_index)}
if gpu_name:
gpu_tags["GpuDeviceName"] = gpu_name
# There's only 1 GPU per each index, so we record 1 here.
gpus_available_record = Record(
gauge=METRICS_GAUGES["node_gpus_available"],
value=1,
tags=gpu_tags,
)
gpus_utilization_record = Record(
gauge=METRICS_GAUGES["node_gpus_utilization"],
value=gpus_utilization,
tags=gpu_tags,
)
gram_used_record = Record(
gauge=METRICS_GAUGES["node_gram_used"],
value=gram_used,
tags=gpu_tags,
)
gram_available_record = Record(
gauge=METRICS_GAUGES["node_gram_available"],
value=gram_available,
tags=gpu_tags,
)
records_reported.extend(
[
gpus_available_record,
gpus_utilization_record,
gram_used_record,
gram_available_record,
]
)
# -- TPU per node --
tpus = stats["tpus"]
for tpu in tpus:
tpu_index = tpu.get("index")
tpu_name = tpu.get("name")
tpu_type = tpu.get("tpu_type")
tpu_topology = tpu.get("tpu_topology")
tensorcore_utilization = tpu.get("tensorcore_utilization")
hbm_utilization = tpu.get("hbm_utilization")
duty_cycle = tpu.get("duty_cycle")
memory_used = tpu.get("memory_used")
memory_total = tpu.get("memory_total")
tpu_tags = {
**node_tags,
"TpuIndex": str(tpu_index),
"TpuDeviceName": tpu_name,
"TpuType": tpu_type,
"TpuTopology": tpu_topology,
}
tensorcore_utilization_record = Record(
gauge=METRICS_GAUGES["tpu_tensorcore_utilization"],
value=tensorcore_utilization,
tags=tpu_tags,
)
hbm_utilization_record = Record(
gauge=METRICS_GAUGES["tpu_memory_bandwidth_utilization"],
value=hbm_utilization,
tags=tpu_tags,
)
duty_cycle_record = Record(
gauge=METRICS_GAUGES["tpu_duty_cycle"],
value=duty_cycle,
tags=tpu_tags,
)
memory_used_record = Record(
gauge=METRICS_GAUGES["tpu_memory_used"],
value=memory_used,
tags=tpu_tags,
)
memory_total_record = Record(
gauge=METRICS_GAUGES["tpu_memory_total"],
value=memory_total,
tags=tpu_tags,
)
records_reported.extend(
[
tensorcore_utilization_record,
hbm_utilization_record,
duty_cycle_record,
memory_used_record,
memory_total_record,
]
)
# -- Disk per node --
disk_io_stats = stats["disk_io"]
disk_read_record = Record(
gauge=METRICS_GAUGES["node_disk_io_read"],
value=disk_io_stats[0],
tags=node_tags,
)
disk_write_record = Record(
gauge=METRICS_GAUGES["node_disk_io_write"],
value=disk_io_stats[1],
tags=node_tags,
)
disk_read_count_record = Record(
gauge=METRICS_GAUGES["node_disk_io_read_count"],
value=disk_io_stats[2],
tags=node_tags,
)
disk_write_count_record = Record(
gauge=METRICS_GAUGES["node_disk_io_write_count"],
value=disk_io_stats[3],
tags=node_tags,
)
disk_io_speed_stats = stats["disk_io_speed"]
disk_read_speed_record = Record(
gauge=METRICS_GAUGES["node_disk_io_read_speed"],
value=disk_io_speed_stats[0],
tags=node_tags,
)
disk_write_speed_record = Record(
gauge=METRICS_GAUGES["node_disk_io_write_speed"],
value=disk_io_speed_stats[1],
tags=node_tags,
)
disk_read_iops_record = Record(
gauge=METRICS_GAUGES["node_disk_read_iops"],
value=disk_io_speed_stats[2],
tags=node_tags,
)
disk_write_iops_record = Record(
gauge=METRICS_GAUGES["node_disk_write_iops"],
value=disk_io_speed_stats[3],
tags=node_tags,
)
used = stats["disk"]["/"].used
free = stats["disk"]["/"].free
disk_utilization = float(used / (used + free)) * 100
disk_usage_record = Record(
gauge=METRICS_GAUGES["node_disk_usage"], value=used, tags=node_tags
)
disk_free_record = Record(
gauge=METRICS_GAUGES["node_disk_free"], value=free, tags=node_tags
)
disk_utilization_percentage_record = Record(
gauge=METRICS_GAUGES["node_disk_utilization_percentage"],
value=disk_utilization,
tags=node_tags,
)
# -- Network speed (send/receive) stats per node --
network_stats = stats["network"]
network_sent_record = Record(
gauge=METRICS_GAUGES["node_network_sent"],
value=network_stats[0],
tags=node_tags,
)
network_received_record = Record(
gauge=METRICS_GAUGES["node_network_received"],
value=network_stats[1],
tags=node_tags,
)
# -- Network speed (send/receive) per node --
network_speed_stats = stats["network_speed"]
network_send_speed_record = Record(
gauge=METRICS_GAUGES["node_network_send_speed"],
value=network_speed_stats[0],
tags=node_tags,
)
network_receive_speed_record = Record(
gauge=METRICS_GAUGES["node_network_receive_speed"],
value=network_speed_stats[1],
tags=node_tags,
)
"""
Record system stats.
"""
if self._is_head_node:
gcs_stats = stats["gcs"]
if gcs_stats:
records_reported.extend(
self._generate_system_stats_record(
[gcs_stats], "gcs", pid=str(gcs_stats["pid"])
)
)
# Record component metrics.
raylet_stats = stats["raylet"]
if raylet_stats:
raylet_pid = str(raylet_stats["pid"])
records_reported.extend(
self._generate_system_stats_record(
[raylet_stats], "raylet", pid=raylet_pid
)
)
workers_stats = stats["workers"]
records_reported.extend(self.generate_worker_stats_record(workers_stats))
agent_stats = stats["agent"]
if agent_stats:
agent_pid = str(agent_stats["pid"])
records_reported.extend(
self._generate_system_stats_record(
[agent_stats], "agent", pid=agent_pid
)
)
# NOTE: Dashboard metrics is recorded within the dashboard because
# it can be deployed as a standalone instance. It shouldn't
# depend on the agent.
records_reported.extend(
[
cpu_record,
cpu_count_record,
mem_used_record,
mem_available_record,
mem_total_record,
disk_read_record,
disk_write_record,
disk_read_count_record,
disk_write_count_record,
disk_read_speed_record,
disk_write_speed_record,
disk_read_iops_record,
disk_write_iops_record,
disk_usage_record,
disk_free_record,
disk_utilization_percentage_record,
network_sent_record,
network_received_record,
network_send_speed_record,
network_receive_speed_record,
]
)
return records_reported
async def _run_loop(self):
"""Get any changes to the log files and push updates to kv."""
loop = get_or_create_event_loop()
while True:
try:
# Fetch autoscaler debug status
autoscaler_status_json_bytes: Optional[bytes] = None
if self._is_head_node:
autoscaler_status_json_bytes = (
await self._gcs_client.async_internal_kv_get(
DEBUG_AUTOSCALING_STATUS.encode(),
None,
timeout=GCS_RPC_TIMEOUT_SECONDS,
)
)
self._gcs_pid = await self._gcs_client.async_internal_kv_get(
GCS_PID_KEY.encode(),
None,
timeout=GCS_RPC_TIMEOUT_SECONDS,
)
self._gcs_pid = (
int(self._gcs_pid.decode()) if self._gcs_pid else None
)
# NOTE: Stats collection is executed inside the thread-pool
# executor (TPE) to avoid blocking the Agent's event-loop
json_payload = await loop.run_in_executor(
self._executor,
self._run_in_executor,
autoscaler_status_json_bytes,
)
await self._gcs_client.async_publish_node_resource_usage(
self._key, json_payload
)
except Exception:
logger.exception("Error publishing node physical stats.")
await asyncio.sleep(reporter_consts.REPORTER_UPDATE_INTERVAL_MS / 1000)
def _run_in_executor(self, cluster_autoscaling_stats_json: Optional[bytes]) -> str:
return asyncio.run(
self._async_compose_stats_payload(cluster_autoscaling_stats_json)
)
async def _async_compose_stats_payload(
self, cluster_autoscaling_stats_json: Optional[bytes]
) -> str:
stats = await self._async_collect_stats()
# Report stats only when metrics collection is enabled.
if not self._metrics_collection_disabled:
cluster_stats = (
json.loads(cluster_autoscaling_stats_json.decode())
if cluster_autoscaling_stats_json
else {}
)
records = self._to_records(stats, cluster_stats)
if RAY_ENABLE_OPEN_TELEMETRY:
self._open_telemetry_metric_recorder.record_and_export(
records,
global_tags={
"Version": ray.__version__,
"SessionName": self._session_name,
},
)
else:
self._metrics_agent.record_and_export(
records,
global_tags={
"Version": ray.__version__,
"SessionName": self._session_name,
},
)
self._metrics_agent.clean_all_dead_worker_metrics()
return self._generate_stats_payload(stats)
def _generate_stats_payload(self, stats: dict) -> str:
# Convert processes_pids back to a list of dictionaries to maintain backwards-compatibility
for gpu in stats["gpus"]:
if isinstance(gpu.get("processes_pids"), dict):
gpu["processes_pids"] = list(gpu["processes_pids"].values())
if StatsPayload is not None:
stats_dict = dashboard_utils.to_google_style(recursive_asdict(stats))
parsed_stats = StatsPayload.parse_obj(stats_dict)
out = json.dumps(parsed_stats.dict())
return out
else:
# NOTE: This converts keys to "Google style", (e.g: "processes_pids" -> "processesPids")
return jsonify_asdict(stats)
async def run(self, server):
if server:
reporter_pb2_grpc.add_ReporterServiceServicer_to_server(self, server)
if RAY_ENABLE_OPEN_TELEMETRY:
metrics_service_pb2_grpc.add_MetricsServiceServicer_to_server(
self, server
)
# Initialize GPU metric provider when the agent starts
self._gpu_metric_provider.initialize()
await self._run_loop()
@staticmethod
def is_minimal_module():
return False
| ReporterAgent |
python | keras-team__keras | guides/making_new_layers_and_models_via_subclassing.py | {
"start": 17822,
"end": 18507
} | class ____(layers.Layer):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(
self, latent_dim=32, intermediate_dim=64, name="encoder", **kwargs
):
super().__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_mean = layers.Dense(latent_dim)
self.dense_log_var = layers.Dense(latent_dim)
self.sampling = Sampling()
def call(self, inputs):
x = self.dense_proj(inputs)
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
| Encoder |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0077_remote_repository_data_migration.py | {
"start": 489,
"end": 712
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0076_project_remote_repository"),
]
operations = [
migrations.RunPython(migrate_data),
]
| Migration |
python | astropy__astropy | astropy/visualization/lupton_rgb.py | {
"start": 9537,
"end": 11805
} | class ____(AsinhMapping):
"""
A mapping for an asinh stretch, estimating the linear stretch by zscale.
x = asinh(Q (I - z1)/(z2 - z1))/Q
Parameters
----------
image1 : ndarray or a list of arrays
The image to analyse, or a list of 3 images to be converted to
an intensity image.
image2 : ndarray, optional
the second image to analyse (must be specified with image3).
image3 : ndarray, optional
the third image to analyse (must be specified with image2).
Q : float, optional
The asinh softening parameter. Default is 8.
pedestal : float or sequence(3), optional
The value, or array of 3 values, to subtract from the images; or None.
Notes
-----
pedestal, if not None, is removed from the images when calculating the
zscale stretch, and added back into Mapping.minimum[]
"""
def __init__(self, image1, image2=None, image3=None, Q=8, pedestal=None):
if image2 is None or image3 is None:
if not (image2 is None and image3 is None):
raise ValueError(
"please specify either a single image or three images."
)
image = [image1]
else:
image = [image1, image2, image3]
if pedestal is not None:
try:
len(pedestal)
except TypeError:
pedestal = 3 * [pedestal]
if len(pedestal) != 3:
raise ValueError("please provide 1 or 3 pedestals.")
image = list(image) # needs to be mutable
for i, im in enumerate(image):
if pedestal[i] != 0.0:
image[i] = im - pedestal[i] # n.b. a copy
else:
pedestal = len(image) * [0.0]
image = compute_intensity(*image)
zscale_limits = ZScaleInterval().get_limits(image)
zscale = LinearMapping(*zscale_limits, image=image)
# zscale.minimum is always a triple
stretch = zscale.maximum - zscale.minimum[0]
minimum = zscale.minimum
for i, level in enumerate(pedestal):
minimum[i] += level
AsinhMapping.__init__(self, minimum, stretch, Q)
self._image = image
| AsinhZScaleMapping |
python | eth-brownie__brownie | brownie/test/managers/runner.py | {
"start": 1385,
"end": 3765
} | class ____:
def __init__(
self, revert_msg=None, dev_revert_msg=None, revert_pattern=None, dev_revert_pattern=None
):
if revert_msg is not None and revert_pattern is not None:
raise ValueError("Can only use one of`revert_msg` and `revert_pattern`")
if dev_revert_msg is not None and dev_revert_pattern is not None:
raise ValueError("Can only use one of `dev_revert_msg` and `dev_revert_pattern`")
if revert_pattern:
regex_compile(revert_pattern)
if dev_revert_pattern:
regex_compile(dev_revert_pattern)
self.revert_msg = revert_msg
self.dev_revert_msg = dev_revert_msg
self.revert_pattern = revert_pattern
self.dev_revert_pattern = dev_revert_pattern
self.always_transact = CONFIG.argv["always_transact"]
if revert_msg is not None and (revert_msg.startswith("dev:") or dev_revert_msg):
# run calls as transactinos when catching a dev revert string
CONFIG.argv["always_transact"] = True
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
CONFIG.argv["always_transact"] = self.always_transact
if exc_type is None:
raise AssertionError("Transaction did not revert")
if exc_type is not VirtualMachineError:
raise
message = self.dev_revert_msg
pattern = self.dev_revert_pattern
if message or pattern:
actual = exc_value.dev_revert_msg
if (
actual is None
or (pattern and not regex_fullmatch(pattern, actual))
or (message and message != actual)
):
raise AssertionError(
f"Unexpected dev revert string '{actual}'\n{exc_value.source}"
) from None
message = self.revert_msg
pattern = self.revert_pattern
if message or pattern:
actual = exc_value.revert_msg
if (
actual is None
or (pattern and not regex_fullmatch(pattern, actual))
or (message and message != actual)
):
raise AssertionError(
f"Unexpected revert string '{actual}'\n{exc_value.source}"
) from None
return True
| RevertContextManager |
python | coleifer__peewee | tests/cockroachdb.py | {
"start": 569,
"end": 618
} | class ____(TestModel):
data = TextField()
| Normal |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_data_bar05.py | {
"start": 345,
"end": 7604
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.index = 0
worksheet.conditional_format(
"A1",
{
"type": "data_bar",
"bar_direction": "left",
},
)
worksheet.conditional_format(
"A2:B2",
{
"type": "data_bar",
"bar_color": "#63C384",
"bar_direction": "right",
},
)
worksheet.conditional_format(
"A3:C3",
{
"type": "data_bar",
"bar_color": "#FF555A",
"bar_negative_color": "#FFFF00",
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData/>
<conditionalFormatting sqref="A1">
<cfRule type="dataBar" priority="1">
<dataBar>
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FF638EC6"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000001}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A2:B2">
<cfRule type="dataBar" priority="2">
<dataBar>
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FF63C384"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000002}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A3:C3">
<cfRule type="dataBar" priority="3">
<dataBar>
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FFFF555A"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000003}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{78C0D931-6437-407d-A8EE-F0AAD7539E65}">
<x14:conditionalFormattings>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000001}">
<x14:dataBar minLength="0" maxLength="100" border="1" direction="leftToRight" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="autoMin"/>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FF638EC6"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A1</xm:sqref>
</x14:conditionalFormatting>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000002}">
<x14:dataBar minLength="0" maxLength="100" border="1" direction="rightToLeft" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="autoMin"/>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FF63C384"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A2:B2</xm:sqref>
</x14:conditionalFormatting>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000003}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="autoMin"/>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FFFF555A"/>
<x14:negativeFillColor rgb="FFFFFF00"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A3:C3</xm:sqref>
</x14:conditionalFormatting>
</x14:conditionalFormattings>
</ext>
</extLst>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | kubernetes-client__python | kubernetes/client/api/events_api.py | {
"start": 543,
"end": 5185
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/events.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| EventsApi |
python | walkccc__LeetCode | solutions/759. Employee Free Time/759.py | {
"start": 0,
"end": 425
} | class ____:
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
ans = []
intervals = []
for s in schedule:
intervals.extend(s)
intervals.sort(key=lambda x: x.start)
prevEnd = intervals[0].end
for interval in intervals:
if interval.start > prevEnd:
ans.append(Interval(prevEnd, interval.start))
prevEnd = max(prevEnd, interval.end)
return ans
| Solution |
python | doocs__leetcode | solution/1200-1299/1222.Queens That Can Attack the King/Solution.py | {
"start": 0,
"end": 567
} | class ____:
def queensAttacktheKing(
self, queens: List[List[int]], king: List[int]
) -> List[List[int]]:
n = 8
s = {(i, j) for i, j in queens}
ans = []
for a in range(-1, 2):
for b in range(-1, 2):
if a or b:
x, y = king
while 0 <= x + a < n and 0 <= y + b < n:
x, y = x + a, y + b
if (x, y) in s:
ans.append([x, y])
break
return ans
| Solution |
python | PrefectHQ__prefect | tests/test_flows.py | {
"start": 44981,
"end": 50949
} | class ____:
async def test_flows_fail_with_timeout(self):
@flow(timeout_seconds=0.1)
def my_flow():
time.sleep(SLEEP_TIME)
state = my_flow(return_state=True)
assert state.is_failed()
assert state.name == "TimedOut"
with pytest.raises(TimeoutError):
await state.result()
assert "exceeded timeout of 0.1 second(s)" in state.message
async def test_async_flows_fail_with_timeout(self):
@flow(timeout_seconds=0.1)
async def my_flow():
await anyio.sleep(SLEEP_TIME)
state = await my_flow(return_state=True)
assert state.is_failed()
assert state.name == "TimedOut"
with pytest.raises(TimeoutError):
await state.result()
assert "exceeded timeout of 0.1 second(s)" in state.message
async def test_timeout_only_applies_if_exceeded(self):
@flow(timeout_seconds=10)
def my_flow():
time.sleep(0.1)
state = my_flow(return_state=True)
assert state.is_completed()
@pytest.mark.skip(reason="Fails with new engine, passed on old engine")
async def test_user_timeout_is_not_hidden(self):
@flow(timeout_seconds=30)
def my_flow():
raise TimeoutError("Oh no!")
state = my_flow(return_state=True)
assert state.is_failed()
with pytest.raises(TimeoutError, match="Oh no!"):
await state.result()
assert "exceeded timeout" not in state.message
@pytest.mark.timeout(method="thread") # alarm-based pytest-timeout will interfere
def test_timeout_does_not_wait_for_completion_for_sync_flows(self, tmp_path):
completed = False
@flow(timeout_seconds=0.1)
def my_flow():
time.sleep(SLEEP_TIME)
nonlocal completed
completed = True
state = my_flow(return_state=True)
assert state.is_failed()
assert "exceeded timeout of 0.1 second(s)" in state.message
assert not completed
def test_timeout_stops_execution_at_next_task_for_sync_flows(self, tmp_path):
"""
Sync flow runs tasks will fail after a timeout which will cause the flow to exit
"""
completed = False
task_completed = False
@task
def my_task():
nonlocal task_completed
task_completed = True
@flow(timeout_seconds=0.1)
def my_flow():
time.sleep(SLEEP_TIME)
my_task()
nonlocal completed
completed = True
state = my_flow(return_state=True)
assert state.is_failed()
assert "exceeded timeout of 0.1 second(s)" in state.message
assert not completed
assert not task_completed
async def test_timeout_stops_execution_after_await_for_async_flows(self, tmp_path):
"""
Async flow runs can be cancelled after a timeout
"""
completed = False
@flow(timeout_seconds=0.1)
async def my_flow():
# Sleep in intervals to give more chances for interrupt
for _ in range(100):
await anyio.sleep(0.1)
nonlocal completed
completed = True
state = await my_flow(return_state=True)
assert state.is_failed()
assert "exceeded timeout of 0.1 second(s)" in state.message
assert not completed
async def test_timeout_stops_execution_in_async_subflows(self, tmp_path):
"""
Async flow runs can be cancelled after a timeout
"""
completed = False
@flow(timeout_seconds=0.1)
async def my_subflow():
# Sleep in intervals to give more chances for interrupt
for _ in range(SLEEP_TIME * 10):
await anyio.sleep(0.1)
nonlocal completed
completed = True
@flow
async def my_flow():
subflow_state = await my_subflow(return_state=True)
return None, subflow_state
state = await my_flow(return_state=True)
(_, subflow_state) = await state.result()
assert "exceeded timeout of 0.1 second(s)" in subflow_state.message
assert not completed
async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path):
"""
Sync flow runs can be cancelled after a timeout once a task is called
"""
completed = False
@task
def timeout_noticing_task():
pass
@flow(timeout_seconds=0.1)
def my_subflow():
start = time.monotonic()
while time.monotonic() - start < 0.5:
pass
timeout_noticing_task()
nonlocal completed
completed = True
@flow
def my_flow():
subflow_state = my_subflow(return_state=True)
return None, subflow_state
state = my_flow(return_state=True)
(_, subflow_state) = await state.result()
assert "exceeded timeout of 0.1 second(s)" in subflow_state.message
assert not completed
async def test_subflow_timeout_waits_until_execution_starts(self, tmp_path):
"""
Subflow with a timeout shouldn't start their timeout before the subflow is started.
Fixes: https://github.com/PrefectHQ/prefect/issues/7903.
"""
completed = False
@flow(timeout_seconds=1)
async def downstream_flow():
nonlocal completed
completed = True
@task
async def sleep_task(n):
await anyio.sleep(n)
@flow
async def my_flow():
upstream_sleepers = sleep_task.map([0.5, 1.0])
await downstream_flow(wait_for=upstream_sleepers)
state = await my_flow(return_state=True)
assert state.is_completed()
# Validate the sleep tasks have ran
assert completed
| TestFlowTimeouts |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 21920,
"end": 22152
} | class ____(_DgConfigErrorRecord):
parent_key: str
key: str
@property
def message(self) -> str:
return f"Unrecognized field at `{self.parent_key}`:\n {self.key}"
@record
| _DgConfigUnrecognizedFieldErrorRecord |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 262835,
"end": 263121
} | class ____(sgqlc.types.Input):
"""Ways in which to filter lists of projects."""
__schema__ = github_schema
__field_names__ = ("state",)
state = sgqlc.types.Field(ProjectV2State, graphql_name="state")
"""List project v2 filtered by the state given."""
| ProjectV2Filters |
python | django__django | tests/generic_views/views.py | {
"start": 6049,
"end": 6231
} | class ____(BookDetail):
def get_object(self, queryset=None):
return super().get_object(queryset=Book.objects.filter(pk=self.kwargs["pk"]))
| BookDetailGetObjectCustomQueryset |
python | huggingface__transformers | src/transformers/models/falcon_mamba/modular_falcon_mamba.py | {
"start": 25909,
"end": 25958
} | class ____(MambaOutput):
pass
| FalconMambaOutput |
python | scrapy__scrapy | scrapy/contracts/default.py | {
"start": 320,
"end": 593
} | class ____(Contract):
"""Contract to set the url of the request (mandatory)
@url http://scrapy.org
"""
name = "url"
def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:
args["url"] = self.args[0]
return args
| UrlContract |
python | numba__numba | numba/tests/test_struct_ref.py | {
"start": 1594,
"end": 2813
} | class ____(types.StructRef):
"""Test associated with this type represent the higher-level uses of
structef.
"""
pass
# Call to define_proxy is needed to register the use of `MyStruct` as a
# PyObject proxy for creating a Numba-allocated structref.
# The `MyStruct` class can then be used in both jit-code and interpreted-code.
structref.define_proxy(
MyStruct,
MyStructType,
['values', 'counter'],
)
@njit
def my_struct(values, counter):
st = structref.new(my_struct_ty)
my_struct_init(st, values, counter)
return st
@njit
def my_struct_init(self, values, counter):
self.values = values
self.counter = counter
@njit
def ctor_by_intrinsic(vs, ctr):
st = my_struct(vs, counter=ctr)
st.values += st.values
st.counter *= ctr
return st
@njit
def ctor_by_class(vs, ctr):
return MyStruct(values=vs, counter=ctr)
@njit
def get_values(st):
return st.values
@njit
def set_values(st, val):
st.values = val
@njit
def get_counter(st):
return st.counter
@njit
def compute_fields(st):
return st.values + st.counter
@njit
def test_structref_is():
c = MyStruct(3, 4)
d = MyStruct(3, 4)
return (c is c, c is d)
| MyStructType |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/operators/dep_operators.py | {
"start": 1182,
"end": 3441
} | class ____(
BuiltinAutomationCondition[T_EntityKey], Generic[T_EntityKey, U_EntityKey]
):
key: U_EntityKey
operand: AutomationCondition[U_EntityKey]
@property
def name(self) -> str:
return self.key.to_user_string()
@property
def children(self) -> Sequence[AutomationCondition]:
return [self.operand]
async def evaluate( # pyright: ignore[reportIncompatibleMethodOverride]
self, context: AutomationContext[T_EntityKey]
) -> AutomationResult[T_EntityKey]:
# if the key we're mapping to is a child of the key we're mapping from and is not
# self-dependent, use the downstream mapping function, otherwise use upstream
if (
self.key in context.asset_graph.get(context.key).child_entity_keys
and self.key != context.key
):
directions = ("down", "up")
else:
directions = ("up", "down")
to_candidate_subset = context.candidate_subset.compute_mapped_subset(
self.key, direction=directions[0]
)
to_context = context.for_child_condition(
child_condition=self.operand,
child_indices=[0],
candidate_subset=to_candidate_subset,
)
to_result = await to_context.evaluate_async()
true_subset = to_result.true_subset.compute_mapped_subset(
context.key, direction=directions[1]
)
return AutomationResult(context=context, true_subset=true_subset, child_results=[to_result])
@public
def replace(
self, old: Union[AutomationCondition, str], new: T_AutomationCondition
) -> Union[Self, T_AutomationCondition]:
"""Replaces all instances of ``old`` across any sub-conditions with ``new``.
If ``old`` is a string, then conditions with a label or name matching
that string will be replaced.
Args:
old (Union[AutomationCondition, str]): The condition to replace.
new (AutomationCondition): The condition to replace with.
"""
return (
new
if old in [self, self.name, self.get_label()]
else copy(self, operand=self.operand.replace(old, new))
)
@record
| EntityMatchesCondition |
python | getsentry__sentry | src/sentry/replays/usecases/query/conditions/aggregate.py | {
"start": 5585,
"end": 6586
} | class ____(GenericBase):
@staticmethod
def visit_eq(expression: Expression, value: str) -> Condition:
return contains(StringArray.visit_eq(expression, value))
@staticmethod
def visit_neq(expression: Expression, value: str) -> Condition:
return does_not_contain(StringArray.visit_eq(expression, value))
@staticmethod
def visit_match(expression: Expression, value: str) -> Condition:
return contains(StringArray.visit_match(expression, value))
@staticmethod
def visit_not_match(expression: Expression, value: str) -> Condition:
return does_not_contain(StringArray.visit_match(expression, value))
@staticmethod
def visit_in(expression: Expression, value: list[str]) -> Condition:
return contains(StringArray.visit_in(expression, value))
@staticmethod
def visit_not_in(expression: Expression, value: list[str]) -> Condition:
return does_not_contain(StringArray.visit_in(expression, value))
| SumOfStringArray |
python | RaRe-Technologies__gensim | gensim/models/phrases.py | {
"start": 17014,
"end": 31338
} | class ____(_PhrasesTransformation):
"""Detect phrases based on collocation counts."""
def __init__(
self, sentences=None, min_count=5, threshold=10.0,
max_vocab_size=40000000, delimiter='_', progress_per=10000,
scoring='default', connector_words=frozenset(),
):
"""
Parameters
----------
sentences : iterable of list of str, optional
The `sentences` iterable can be simply a list, but for larger corpora, consider a generator that streams
the sentences directly from disk/network, See :class:`~gensim.models.word2vec.BrownCorpus`,
:class:`~gensim.models.word2vec.Text8Corpus` or :class:`~gensim.models.word2vec.LineSentence`
for such examples.
min_count : float, optional
Ignore all words and bigrams with total collected count lower than this value.
threshold : float, optional
Represent a score threshold for forming the phrases (higher means fewer phrases).
A phrase of words `a` followed by `b` is accepted if the score of the phrase is greater than threshold.
Heavily depends on concrete scoring-function, see the `scoring` parameter.
max_vocab_size : int, optional
Maximum size (number of tokens) of the vocabulary. Used to control pruning of less common words,
to keep memory under control. The default of 40M needs about 3.6GB of RAM. Increase/decrease
`max_vocab_size` depending on how much available memory you have.
delimiter : str, optional
Glue character used to join collocation tokens.
scoring : {'default', 'npmi', function}, optional
Specify how potential phrases are scored. `scoring` can be set with either a string that refers to a
built-in scoring function, or with a function with the expected parameter names.
Two built-in scoring functions are available by setting `scoring` to a string:
#. "default" - :func:`~gensim.models.phrases.original_scorer`.
#. "npmi" - :func:`~gensim.models.phrases.npmi_scorer`.
connector_words : set of str, optional
Set of words that may be included within a phrase, without affecting its scoring.
No phrase can start nor end with a connector word; a phrase may contain any number of
connector words in the middle.
**If your texts are in English, set** ``connector_words=phrases.ENGLISH_CONNECTOR_WORDS``.
This will cause phrases to include common English articles, prepositions and
conjuctions, such as `bank_of_america` or `eye_of_the_beholder`.
For other languages or specific applications domains, use custom ``connector_words``
that make sense there: ``connector_words=frozenset("der die das".split())`` etc.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.word2vec import Text8Corpus
>>> from gensim.models.phrases import Phrases, ENGLISH_CONNECTOR_WORDS
>>>
>>> # Load corpus and train a model.
>>> sentences = Text8Corpus(datapath('testcorpus.txt'))
>>> phrases = Phrases(sentences, min_count=1, threshold=1, connector_words=ENGLISH_CONNECTOR_WORDS)
>>>
>>> # Use the model to detect phrases in a new sentence.
>>> sent = [u'trees', u'graph', u'minors']
>>> print(phrases[sent])
[u'trees_graph', u'minors']
>>>
>>> # Or transform multiple sentences at once.
>>> sents = [[u'trees', u'graph', u'minors'], [u'graph', u'minors']]
>>> for phrase in phrases[sents]:
... print(phrase)
[u'trees_graph', u'minors']
[u'graph_minors']
>>>
>>> # Export a FrozenPhrases object that is more efficient but doesn't allow any more training.
>>> frozen_phrases = phrases.freeze()
>>> print(frozen_phrases[sent])
[u'trees_graph', u'minors']
Notes
-----
The ``scoring="npmi"`` is more robust when dealing with common words that form part of common bigrams, and
ranges from -1 to 1, but is slower to calculate than the default ``scoring="default"``.
The default is the PMI-like scoring as described in `Mikolov, et. al: "Distributed
Representations of Words and Phrases and their Compositionality" <https://arxiv.org/abs/1310.4546>`_.
To use your own custom ``scoring`` function, pass in a function with the following signature:
* ``worda_count`` - number of corpus occurrences in `sentences` of the first token in the bigram being scored
* ``wordb_count`` - number of corpus occurrences in `sentences` of the second token in the bigram being scored
* ``bigram_count`` - number of occurrences in `sentences` of the whole bigram
* ``len_vocab`` - the number of unique tokens in `sentences`
* ``min_count`` - the `min_count` setting of the Phrases class
* ``corpus_word_count`` - the total number of tokens (non-unique) in `sentences`
The scoring function must accept all these parameters, even if it doesn't use them in its scoring.
The scoring function **must be pickleable**.
"""
super().__init__(connector_words=connector_words)
if min_count <= 0:
raise ValueError("min_count should be at least 1")
if threshold <= 0 and scoring == 'default':
raise ValueError("threshold should be positive for default scoring")
if scoring == 'npmi' and (threshold < -1 or threshold > 1):
raise ValueError("threshold should be between -1 and 1 for npmi scoring")
# Set scoring based on string.
# Intentially override the value of the scoring parameter rather than set self.scoring here,
# to still run the check of scoring function parameters in the next code block.
if isinstance(scoring, str):
if scoring == 'default':
scoring = original_scorer
elif scoring == 'npmi':
scoring = npmi_scorer
else:
raise ValueError(f'unknown scoring method string {scoring} specified')
scoring_params = [
'worda_count', 'wordb_count', 'bigram_count', 'len_vocab', 'min_count', 'corpus_word_count',
]
if callable(scoring):
missing = [param for param in scoring_params if param not in getargspec(scoring)[0]]
if not missing:
self.scoring = scoring
else:
raise ValueError(f'scoring function missing expected parameters {missing}')
self.min_count = min_count
self.threshold = threshold
self.max_vocab_size = max_vocab_size
self.vocab = {} # mapping between token => its count
self.min_reduce = 1 # ignore any tokens with count smaller than this
self.delimiter = delimiter
self.progress_per = progress_per
self.corpus_word_count = 0
# Ensure picklability of the scorer.
try:
pickle.loads(pickle.dumps(self.scoring))
except pickle.PickleError:
raise pickle.PickleError(f'Custom scoring function in {self.__class__.__name__} must be pickle-able')
if sentences is not None:
start = time.time()
self.add_vocab(sentences)
self.add_lifecycle_event("created", msg=f"built {self} in {time.time() - start:.2f}s")
def __str__(self):
return "%s<%i vocab, min_count=%s, threshold=%s, max_vocab_size=%s>" % (
self.__class__.__name__, len(self.vocab), self.min_count,
self.threshold, self.max_vocab_size,
)
@staticmethod
def _learn_vocab(sentences, max_vocab_size, delimiter, connector_words, progress_per):
"""Collect unigram and bigram counts from the `sentences` iterable."""
sentence_no, total_words, min_reduce = -1, 0, 1
vocab = {}
logger.info("collecting all words and their counts")
for sentence_no, sentence in enumerate(sentences):
if sentence_no % progress_per == 0:
logger.info(
"PROGRESS: at sentence #%i, processed %i words and %i word types",
sentence_no, total_words, len(vocab),
)
start_token, in_between = None, []
for word in sentence:
if word not in connector_words:
vocab[word] = vocab.get(word, 0) + 1
if start_token is not None:
phrase_tokens = itertools.chain([start_token], in_between, [word])
joined_phrase_token = delimiter.join(phrase_tokens)
vocab[joined_phrase_token] = vocab.get(joined_phrase_token, 0) + 1
start_token, in_between = word, [] # treat word as both end of a phrase AND beginning of another
elif start_token is not None:
in_between.append(word)
total_words += 1
if len(vocab) > max_vocab_size:
utils.prune_vocab(vocab, min_reduce)
min_reduce += 1
logger.info(
"collected %i token types (unigram + bigrams) from a corpus of %i words and %i sentences",
len(vocab), total_words, sentence_no + 1,
)
return min_reduce, vocab, total_words
def add_vocab(self, sentences):
"""Update model parameters with new `sentences`.
Parameters
----------
sentences : iterable of list of str
Text corpus to update this model's parameters from.
Example
-------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.word2vec import Text8Corpus
>>> from gensim.models.phrases import Phrases, ENGLISH_CONNECTOR_WORDS
>>>
>>> # Train a phrase detector from a text corpus.
>>> sentences = Text8Corpus(datapath('testcorpus.txt'))
>>> phrases = Phrases(sentences, connector_words=ENGLISH_CONNECTOR_WORDS) # train model
>>> assert len(phrases.vocab) == 37
>>>
>>> more_sentences = [
... [u'the', u'mayor', u'of', u'new', u'york', u'was', u'there'],
... [u'machine', u'learning', u'can', u'be', u'new', u'york', u'sometimes'],
... ]
>>>
>>> phrases.add_vocab(more_sentences) # add new sentences to model
>>> assert len(phrases.vocab) == 60
"""
# Uses a separate vocab to collect the token counts from `sentences`.
# This consumes more RAM than merging new sentences into `self.vocab`
# directly, but gives the new sentences a fighting chance to collect
# sufficient counts, before being pruned out by the (large) accumulated
# counts collected in previous learn_vocab runs.
min_reduce, vocab, total_words = self._learn_vocab(
sentences, max_vocab_size=self.max_vocab_size, delimiter=self.delimiter,
progress_per=self.progress_per, connector_words=self.connector_words,
)
self.corpus_word_count += total_words
if self.vocab:
logger.info("merging %i counts into %s", len(vocab), self)
self.min_reduce = max(self.min_reduce, min_reduce)
for word, count in vocab.items():
self.vocab[word] = self.vocab.get(word, 0) + count
if len(self.vocab) > self.max_vocab_size:
utils.prune_vocab(self.vocab, self.min_reduce)
self.min_reduce += 1
else:
# Optimization for a common case: the current vocab is empty, so apply
# the new vocab directly, no need to double it in memory.
self.vocab = vocab
logger.info("merged %s", self)
def score_candidate(self, word_a, word_b, in_between):
# Micro optimization: check for quick early-out conditions, before the actual scoring.
word_a_cnt = self.vocab.get(word_a, 0)
if word_a_cnt <= 0:
return None, None
word_b_cnt = self.vocab.get(word_b, 0)
if word_b_cnt <= 0:
return None, None
phrase = self.delimiter.join([word_a] + in_between + [word_b])
# XXX: Why do we care about *all* phrase tokens? Why not just score the start+end bigram?
phrase_cnt = self.vocab.get(phrase, 0)
if phrase_cnt <= 0:
return None, None
score = self.scoring(
worda_count=word_a_cnt, wordb_count=word_b_cnt, bigram_count=phrase_cnt,
len_vocab=len(self.vocab), min_count=self.min_count, corpus_word_count=self.corpus_word_count,
)
if score <= self.threshold:
return None, None
return phrase, score
def freeze(self):
"""
Return an object that contains the bare minimum of information while still allowing
phrase detection. See :class:`~gensim.models.phrases.FrozenPhrases`.
Use this "frozen model" to dramatically reduce RAM footprint if you don't plan to
make any further changes to your `Phrases` model.
Returns
-------
:class:`~gensim.models.phrases.FrozenPhrases`
Exported object that's smaller, faster, but doesn't support model updates.
"""
return FrozenPhrases(self)
def export_phrases(self):
"""Extract all found phrases.
Returns
------
dict(str, float)
Mapping between phrases and their scores.
"""
result, source_vocab = {}, self.vocab
for token in source_vocab:
unigrams = token.split(self.delimiter)
if len(unigrams) < 2:
continue # no phrases here
phrase, score = self.score_candidate(unigrams[0], unigrams[-1], unigrams[1:-1])
if score is not None:
result[phrase] = score
return result
| Phrases |
python | django__django | django/contrib/gis/geos/io.py | {
"start": 640,
"end": 799
} | class ____(_WKTReader):
def read(self, wkt):
"Return a GEOSGeometry for the given WKT string."
return GEOSGeometry(super().read(wkt))
| WKTReader |
python | huggingface__transformers | src/transformers/models/musicgen/modeling_musicgen.py | {
"start": 59560,
"end": 113330
} | class ____(MusicgenPreTrainedModel, GenerationMixin):
config: MusicgenConfig
output_modalities = ("audio",)
base_model_prefix = "encoder_decoder"
main_input_name = "input_ids"
supports_gradient_checkpointing = True
def __init__(
self,
config: Optional[MusicgenConfig] = None,
text_encoder: Optional[PreTrainedModel] = None,
audio_encoder: Optional[PreTrainedModel] = None,
decoder: Optional[MusicgenForCausalLM] = None,
):
r"""
text_encoder (`PreTrainedModel`, *optional*):
The text encoder model that encodes text into hidden states for conditioning.
audio_encoder (`PreTrainedModel`, *optional*):
The audio encoder model that encodes audio into hidden states for conditioning.
decoder (`MusicgenForCausalLM`, *optional*):
The decoder model that generates audio tokens based on conditioning signals.
"""
if config is None and (text_encoder is None or audio_encoder is None or decoder is None):
raise ValueError(
"Either a configuration has to be provided, or all three of text encoder, audio encoder and MusicGen decoder."
)
if config is None:
config = MusicgenConfig(
text_encoder=text_encoder.config, audio_encoder=audio_encoder.config, decoder=decoder.config
)
else:
if not isinstance(config, self.config_class):
raise ValueError(f"Config: {config} has to be of type {self.config_class}")
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.text_encoder.hidden_size:
raise ValueError(
"If `cross_attention_hidden_size` is specified in the MusicGen decoder's configuration, it has to be equal"
f" to the text encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
f" `config.decoder.cross_attention_hidden_size` and {config.text_encoder.hidden_size} for"
" `config.text_encoder.hidden_size`."
)
# initialize with config
super().__init__(config)
if text_encoder is None:
from ..auto.modeling_auto import AutoModelForTextEncoding
text_encoder = AutoModelForTextEncoding.from_config(config.text_encoder)
if audio_encoder is None:
from ..auto.modeling_auto import AutoModel
audio_encoder = AutoModel.from_config(config.audio_encoder)
if decoder is None:
decoder = MusicgenForCausalLM._from_config(config.decoder)
self.text_encoder = text_encoder
self.audio_encoder = audio_encoder
self.decoder = decoder
if self.text_encoder.config.to_dict() != self.config.text_encoder.to_dict():
logger.warning(
f"Config of the text_encoder: {self.text_encoder.__class__} is overwritten by shared text_encoder config:"
f" {self.config.text_encoder}"
)
if self.audio_encoder.config.to_dict() != self.config.audio_encoder.to_dict():
logger.warning(
f"Config of the audio_encoder: {self.audio_encoder.__class__} is overwritten by shared audio_encoder config:"
f" {self.config.audio_encoder}"
)
if self.decoder.config.to_dict() != self.config.decoder.to_dict():
logger.warning(
f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
f" {self.config.decoder}"
)
# make sure that the individual model's config refers to the shared config
# so that the updates to the config will be synced
self.config.text_encoder._attn_implementation = self.text_encoder.config._attn_implementation
self.config.audio_encoder._attn_implementation = self.audio_encoder.config._attn_implementation
self.config.decoder._attn_implementation = self.decoder.config._attn_implementation
self.text_encoder.config = self.config.text_encoder
self.audio_encoder.config = self.config.audio_encoder
self.decoder.config = self.config.decoder
# text encoder outputs might need to be projected to different dimension for decoder
if (
self.text_encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
self.enc_to_dec_proj = nn.Linear(self.text_encoder.config.hidden_size, self.decoder.config.hidden_size)
if self.text_encoder.get_output_embeddings() is not None:
raise ValueError(
f"The encoder {self.text_encoder} should not have a LM Head. Please use a model without and LM Head"
)
decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys())
if "encoder_hidden_states" not in decoder_signature:
raise ValueError(
"The selected decoder is not prepared for the encoder hidden states to be passed. Please see the "
"following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350"
)
# tie text encoder, decoder weights if config set accordingly
self.post_init()
def get_input_embeddings(self):
return self.text_encoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
@classmethod
def from_sub_models_pretrained(
cls,
text_encoder_pretrained_model_name_or_path: Optional[str] = None,
audio_encoder_pretrained_model_name_or_path: Optional[str] = None,
decoder_pretrained_model_name_or_path: Optional[str] = None,
*model_args,
**kwargs,
) -> PreTrainedModel:
r"""
Instantiate a text encoder, an audio encoder, and a MusicGen decoder from one, two or three base classes of the
library from pretrained model checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you need to first set it back in training mode with `model.train()`.
Params:
text_encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the text encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
audio_encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the audio encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the text encoder configuration, use the prefix *text_encoder_* for each configuration
parameter.
- To update the audio encoder configuration, use the prefix *audio_encoder_* for each configuration
parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import MusicgenForConditionalGeneration
>>> # initialize a musicgen model from a t5 text encoder, encodec audio encoder, and musicgen decoder
>>> model = MusicgenForConditionalGeneration.from_sub_models_pretrained(
... text_encoder_pretrained_model_name_or_path="google-t5/t5-base",
... audio_encoder_pretrained_model_name_or_path="facebook/encodec_24khz",
... decoder_pretrained_model_name_or_path="facebook/musicgen-small",
... )
>>> # saving model after fine-tuning
>>> model.save_pretrained("./musicgen-ft")
>>> # load fine-tuned model
>>> model = MusicgenForConditionalGeneration.from_pretrained("./musicgen-ft")
```"""
kwargs_text_encoder = {
argument[len("text_encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("text_encoder_")
}
kwargs_audio_encoder = {
argument[len("audio_encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("audio_encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove text encoder, audio encoder and decoder kwargs from kwargs
for key in kwargs_text_encoder:
del kwargs["text_encoder_" + key]
for key in kwargs_audio_encoder:
del kwargs["audio_encoder_" + key]
for key in kwargs_decoder:
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
text_encoder = kwargs_text_encoder.pop("model", None)
if text_encoder is None:
if text_encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `text_encoder_model` is not defined as an argument, a `text_encoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_text_encoder:
encoder_config, kwargs_text_encoder = AutoConfig.from_pretrained(
text_encoder_pretrained_model_name_or_path, **kwargs_text_encoder, return_unused_kwargs=True
)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {text_encoder_pretrained_model_name_or_path} as a text_encoder model "
"from a decoder model. Cross-attention and causal mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_text_encoder["config"] = encoder_config
text_encoder = AutoModel.from_pretrained(
text_encoder_pretrained_model_name_or_path, *model_args, **kwargs_text_encoder
)
audio_encoder = kwargs_audio_encoder.pop("model", None)
if audio_encoder is None:
if audio_encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `audio_encoder_model` is not defined as an argument, an `audio_encoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_audio_encoder:
encoder_config, kwargs_audio_encoder = AutoConfig.from_pretrained(
audio_encoder_pretrained_model_name_or_path, **kwargs_audio_encoder, return_unused_kwargs=True
)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {audio_encoder_pretrained_model_name_or_path} as an audio_encoder model "
"from a decoder model. Cross-attention and causal mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_audio_encoder["config"] = encoder_config
audio_encoder = AutoModel.from_pretrained(
audio_encoder_pretrained_model_name_or_path, *model_args, **kwargs_audio_encoder
)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_decoder:
decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
)
if isinstance(decoder_config, MusicgenConfig):
decoder_config = decoder_config.decoder
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
"passed to `.from_sub_models_pretrained(...)` are set to `True` or do not pass a "
"`decoder_config` to `.from_sub_models_pretrained(...)`"
)
decoder = MusicgenForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# instantiate config with corresponding kwargs
config = MusicgenConfig(
text_encoder=text_encoder.config, audio_encoder=audio_encoder.config, decoder=decoder.config, **kwargs
)
return cls(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder, config=config)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.BoolTensor] = None,
input_values: Optional[torch.FloatTensor] = None,
padding_mask: Optional[torch.BoolTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
encoder_outputs: Optional[tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, Seq2SeqLMOutput]:
r"""
padding_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary, corresponding to the sequence of audio codes.
Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes,
such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
<Tip warning={true}>
The `decoder_input_ids` will automatically be converted from shape `(batch_size * num_codebooks,
target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If
you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of
frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks,
target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as
`decoder_input_ids`.
</Tip>
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoProcessor, MusicgenForConditionalGeneration
>>> import torch
>>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
>>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
>>> inputs = processor(
... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"],
... padding=True,
... return_tensors="pt",
... )
>>> pad_token_id = model.generation_config.pad_token_id
>>> decoder_input_ids = (
... torch.ones((inputs.input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long)
... * pad_token_id
... )
>>> logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits
>>> logits.shape # (bsz * num_codebooks, tgt_len, vocab_size)
torch.Size([8, 1, 2048])
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_text_encoder = {
argument[len("text_encoder_")]: value
for argument, value in kwargs.items()
if argument.startswith("text_encoder_")
}
kwargs_audio_encoder = {
argument[len("audio_encoder_")]: value
for argument, value in kwargs.items()
if argument.startswith("audio_encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
if encoder_outputs is None:
encoder_outputs = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs_text_encoder,
)
elif isinstance(encoder_outputs, tuple):
encoder_outputs = BaseModelOutput(*encoder_outputs)
encoder_hidden_states = encoder_outputs[0]
# optionally project encoder_hidden_states
if (
self.text_encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
if attention_mask is not None:
encoder_hidden_states = encoder_hidden_states * attention_mask[..., None]
if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
decoder_input_ids = shift_tokens_right(
labels, self.config.decoder.pad_token_id, self.config.decoder.decoder_start_token_id
)
elif decoder_input_ids is None and decoder_inputs_embeds is None:
audio_encoder_outputs = self.audio_encoder(
input_values=input_values,
padding_mask=padding_mask,
**kwargs_audio_encoder,
)
audio_codes = audio_encoder_outputs.audio_codes
frames, bsz, codebooks, seq_len = audio_codes.shape
if frames != 1:
raise ValueError(
f"Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is "
"disabled by setting `chunk_length=None` in the audio encoder."
)
if self.config.decoder.audio_channels == 2 and audio_codes.shape[2] == self.decoder.num_codebooks // 2:
# mono input through encodec that we convert to stereo
audio_codes = audio_codes.repeat_interleave(2, dim=2)
decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
past_key_values=past_key_values,
return_dict=return_dict,
labels=labels,
**kwargs_decoder,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqLMOutput(
loss=decoder_outputs.loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_key_values=None,
attention_mask=None,
decoder_attention_mask=None,
use_cache=None,
encoder_outputs=None,
decoder_delay_pattern_mask=None,
guidance_scale=None,
cache_position=None,
**kwargs,
):
# Overwritten -- MusicGen has custom processing
if decoder_delay_pattern_mask is None:
decoder_input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask(
decoder_input_ids,
self.generation_config.pad_token_id,
max_length=self.generation_config.max_length,
)
# apply the delay pattern mask
decoder_input_ids = self.decoder.apply_delay_pattern_mask(decoder_input_ids, decoder_delay_pattern_mask)
if guidance_scale is not None and guidance_scale > 1:
# for classifier free guidance we need to replicate the decoder args across the batch dim (we'll split these
# before sampling)
decoder_input_ids = decoder_input_ids.repeat((2, 1))
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.repeat((2, 1))
if past_key_values is not None:
if cache_position[-1] >= decoder_input_ids.shape[1]:
decoder_input_ids = decoder_input_ids[:, -cache_position.shape[0] :]
elif (
decoder_input_ids.shape[1] != cache_position.shape[0]
): # Default case (the "else", a no op, is Exception 2)
decoder_input_ids = decoder_input_ids[:, cache_position]
else:
# Default to old behavior: keep only final ID
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"use_cache": use_cache,
}
def _prepare_decoder_input_ids_for_generation(
self,
batch_size: int,
model_input_name: str,
model_kwargs: dict[str, torch.Tensor],
decoder_start_token_id: Optional[int] = None,
bos_token_id: Optional[int] = None,
device: Optional[torch.device] = None,
) -> tuple[torch.LongTensor, dict[str, torch.Tensor]]:
"""Prepares `decoder_input_ids` for generation with encoder-decoder models"""
# 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming,
# we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input.
if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
decoder_input_ids = model_kwargs.pop("decoder_input_ids")
elif "input_ids" in model_kwargs and model_input_name != "input_ids":
decoder_input_ids = model_kwargs.pop("input_ids")
else:
decoder_input_ids = None
# 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that.
decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
if device is None:
device = self.device
decoder_input_ids_start = (
torch.ones((batch_size * self.decoder.num_codebooks, 1), dtype=torch.long, device=device)
* decoder_start_token_id
)
# no user input -> use decoder_start_token_id as decoder_input_ids
if decoder_input_ids is None:
decoder_input_ids = decoder_input_ids_start
# user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust
# decoder_attention_mask if provided)
elif (decoder_input_ids[..., 0] != decoder_start_token_id).all().item():
decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1)
if "decoder_attention_mask" in model_kwargs:
decoder_attention_mask = model_kwargs["decoder_attention_mask"]
decoder_attention_mask = torch.cat(
(torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask),
dim=-1,
)
model_kwargs["decoder_attention_mask"] = decoder_attention_mask
return decoder_input_ids, model_kwargs
def _prepare_text_encoder_kwargs_for_generation(
self,
inputs_tensor: torch.Tensor,
model_kwargs,
model_input_name: Optional[str],
generation_config: GenerationConfig,
) -> dict[str, Any]:
# 1. get text encoder
encoder = self.get_encoder()
# Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device
# as the inputs.
if hasattr(encoder, "_hf_hook"):
encoder._hf_hook.io_same_device = True
# 2. Prepare encoder args and encoder kwargs from model kwargs.
irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
encoder_kwargs = {
argument: value
for argument, value in model_kwargs.items()
if not any(argument.startswith(p) for p in irrelevant_prefix)
}
encoder_signature = set(inspect.signature(encoder.forward).parameters)
encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
if not encoder_accepts_wildcard:
encoder_kwargs = {
argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
}
encoder_kwargs["output_attentions"] = generation_config.output_attentions
encoder_kwargs["output_hidden_states"] = generation_config.output_hidden_states
guidance_scale = generation_config.guidance_scale
# 3. make sure that encoder returns `ModelOutput`
model_input_name = model_input_name if model_input_name is not None else self.text_encoder.main_input_name
encoder_kwargs["return_dict"] = True
encoder_kwargs[model_input_name] = inputs_tensor
last_hidden_state = encoder(**encoder_kwargs).last_hidden_state
# for classifier free guidance we need to add a 'null' input to our encoder hidden states
if guidance_scale is not None and guidance_scale > 1:
last_hidden_state = torch.concatenate([last_hidden_state, torch.zeros_like(last_hidden_state)], dim=0)
if "attention_mask" in model_kwargs:
model_kwargs["attention_mask"] = torch.concatenate(
[model_kwargs["attention_mask"], torch.zeros_like(model_kwargs["attention_mask"])], dim=0
)
model_kwargs["encoder_outputs"] = BaseModelOutput(last_hidden_state=last_hidden_state)
return model_kwargs
def _prepare_audio_encoder_kwargs_for_generation(
self, input_values, model_kwargs, model_input_name: Optional[str] = None
):
# 1. get audio encoder
encoder = self.get_encoder(modality="audio")
# Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device
# as the inputs.
if hasattr(encoder, "_hf_hook"):
encoder._hf_hook.io_same_device = True
# 2. Prepare encoder args and encoder kwargs from model kwargs.
irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
encoder_kwargs = {
argument: value
for argument, value in model_kwargs.items()
if not any(argument.startswith(p) for p in irrelevant_prefix)
}
encoder_signature = set(inspect.signature(encoder.forward).parameters)
encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
if not encoder_accepts_wildcard:
encoder_kwargs = {
argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
}
# 3. make sure that encoder returns `ModelOutput`
model_input_name = model_input_name if model_input_name is not None else self.audio_encoder.main_input_name
encoder_kwargs["return_dict"] = True
if self.decoder.config.audio_channels == 1:
encoder_kwargs[model_input_name] = input_values
audio_encoder_outputs = encoder.encode(**encoder_kwargs)
audio_codes = audio_encoder_outputs.audio_codes
audio_scales = audio_encoder_outputs.audio_scales
frames, bsz, codebooks, seq_len = audio_codes.shape
else:
if input_values.shape[1] != 2:
raise ValueError(
f"Expected stereo audio (2-channels) but example has {input_values.shape[1]} channel."
)
encoder_kwargs[model_input_name] = input_values[:, :1, :]
audio_encoder_outputs_left = encoder.encode(**encoder_kwargs)
audio_codes_left = audio_encoder_outputs_left.audio_codes
audio_scales_left = audio_encoder_outputs_left.audio_scales
encoder_kwargs[model_input_name] = input_values[:, 1:, :]
audio_encoder_outputs_right = encoder.encode(**encoder_kwargs)
audio_codes_right = audio_encoder_outputs_right.audio_codes
audio_scales_right = audio_encoder_outputs_right.audio_scales
frames, bsz, codebooks, seq_len = audio_codes_left.shape
# copy alternating left/right channel codes into stereo codebook
audio_codes = audio_codes_left.new_ones((frames, bsz, 2 * codebooks, seq_len))
audio_codes[:, :, ::2, :] = audio_codes_left
audio_codes[:, :, 1::2, :] = audio_codes_right
if audio_scales_left != [None] or audio_scales_right != [None]:
audio_scales = torch.stack([audio_scales_left, audio_scales_right], dim=1)
else:
audio_scales = [None] * bsz
if frames != 1:
raise ValueError(
f"Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is "
"disabled by setting `chunk_length=None` in the audio encoder."
)
decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len)
model_kwargs["decoder_input_ids"] = decoder_input_ids
model_kwargs["audio_scales"] = audio_scales
return model_kwargs
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.decoder.pad_token_id, self.config.decoder.bos_token_id)
def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError(
"Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the"
" respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
" model.decoder.resize_token_embeddings(...))"
)
def freeze_audio_encoder(self):
"""
Freeze the audio encoder weights.
"""
for param in self.audio_encoder.parameters():
param.requires_grad = False
self.audio_encoder._requires_grad = False
def freeze_text_encoder(self):
"""
Freeze the text encoder weights.
"""
for param in self.text_encoder.parameters():
param.requires_grad = False
self.text_encoder._requires_grad = False
def _maybe_initialize_input_ids_for_generation(
self,
inputs: Optional[torch.Tensor] = None,
bos_token_id: Optional[int] = None,
model_kwargs: Optional[dict[str, torch.Tensor]] = None,
) -> torch.LongTensor:
"""Initializes input ids for generation, if necessary."""
if inputs is not None:
return inputs
encoder_outputs = model_kwargs.get("encoder_outputs")
if encoder_outputs is not None:
# make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding
shape = encoder_outputs[0].size()[:-1]
return torch.ones(shape, dtype=torch.long, device=self.device) * -100
if bos_token_id is None:
raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
# If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with
# soft-prompting or in multimodal implementations built on top of decoder-only language models.
batch_size = 1
for value in model_kwargs.values():
if isinstance(value, torch.Tensor):
batch_size = value.shape[0]
break
return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id
def _get_decoder_start_token_id(
self, decoder_start_token_id: Optional[Union[int, list[int]]] = None, bos_token_id: Optional[int] = None
) -> int:
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.generation_config.decoder_start_token_id
)
bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
if decoder_start_token_id is not None:
return decoder_start_token_id
elif bos_token_id is not None:
return bos_token_id
raise ValueError(
"`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
)
@torch.no_grad()
def generate(
self,
inputs: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
synced_gpus: Optional[bool] = None,
streamer: Optional["BaseStreamer"] = None,
use_model_defaults: Optional[bool] = None,
**kwargs,
):
"""
Generates sequences of token ids for models with a language modeling head.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Parameters:
inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of
`input_ids`, `input_values`, `input_features`, or `pixel_values`.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed to avoid deadlocking with
`FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
streamer (`BaseStreamer`, *optional*):
Streamer object that will be used to stream the generated sequences. Generated tokens are passed
through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
use_model_defaults (`bool`, *optional*):
When it is `True`, unset parameters in `generation_config` will be set to the model-specific default
generation configuration (`model.generation_config`), as opposed to the global defaults
(`GenerationConfig()`). If unset, models saved starting from `v4.50` will consider this flag to be
`True`.
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
Return:
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.
If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.GenerateDecoderOnlyOutput`],
- [`~generation.GenerateBeamDecoderOnlyOutput`]
If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.GenerateEncoderDecoderOutput`],
- [`~generation.GenerateBeamEncoderDecoderOutput`]
"""
# 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects
generation_mode_kwargs = self._extract_generation_mode_kwargs(None, kwargs, False, None, None)
generation_config, model_kwargs = self._prepare_generation_config(
generation_config, use_model_defaults, **kwargs
)
generation_mode = generation_config.get_generation_mode()
if generation_mode not in [GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH]:
raise ValueError(
"Got incompatible mode for generation, should be one of greedy or sampling. "
"Ensure that beam search is de-activated by setting `num_beams=1`."
)
self._validate_model_kwargs(model_kwargs.copy())
self._validate_generation_mode(generation_mode, generation_config, generation_mode_kwargs)
if model_kwargs.get("encoder_outputs") is not None and type(model_kwargs["encoder_outputs"]) is tuple:
# wrap the unconditional outputs as a BaseModelOutput for compatibility with the rest of generate
model_kwargs["encoder_outputs"] = BaseModelOutput(last_hidden_state=model_kwargs["encoder_outputs"][0])
# 2. Set generation parameters if not already defined
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
requires_attention_mask = "encoder_outputs" not in model_kwargs
kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
# 3. Define model inputs
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, generation_config.bos_token_id, model_kwargs
)
batch_size = inputs_tensor.shape[0]
self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=inputs_tensor.device)
# 4. Define other model kwargs
model_kwargs["use_cache"] = generation_config.use_cache
model_kwargs["guidance_scale"] = generation_config.guidance_scale
if model_kwargs.get("attention_mask", None) is None and requires_attention_mask:
model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
inputs_tensor, generation_config, model_kwargs
)
if "encoder_outputs" not in model_kwargs:
# encoder_outputs are created and added to `model_kwargs`
model_kwargs = self._prepare_text_encoder_kwargs_for_generation(
inputs_tensor, model_kwargs, model_input_name, generation_config
)
if "decoder_input_ids" not in model_kwargs and "input_values" in model_kwargs:
model_kwargs = self._prepare_audio_encoder_kwargs_for_generation(
model_kwargs["input_values"],
model_kwargs,
)
# 5. Prepare `input_ids` which will be used for auto-regressive generation
input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
batch_size=batch_size,
model_input_name=model_input_name,
model_kwargs=model_kwargs,
decoder_start_token_id=generation_config._decoder_start_token_tensor,
bos_token_id=generation_config._bos_token_tensor,
device=inputs_tensor.device,
)
# 6. Prepare `max_length` depending on other stopping criteria.
input_ids_length = input_ids.shape[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
generation_config = self._prepare_generated_length(
generation_config=generation_config,
has_default_max_length=has_default_max_length,
has_default_min_length=has_default_min_length,
model_input_name=model_input_name,
inputs_tensor=inputs_tensor,
input_ids_length=input_ids_length,
)
# build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to MusicGen)
input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask(
input_ids,
pad_token_id=generation_config._decoder_start_token_tensor,
max_length=generation_config.max_length,
)
# stash the delay mask so that we don't have to recompute in each forward pass
model_kwargs["decoder_delay_pattern_mask"] = decoder_delay_pattern_mask
# input_ids are ready to be placed on the streamer (if used)
if streamer is not None:
streamer.put(input_ids.cpu())
# 7. determine generation mode
generation_mode = generation_config.get_generation_mode()
# 8. prepare batched CFG externally (to enable coexistence with the unbatched CFG)
if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1:
logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale))
generation_config.guidance_scale = None
# 9. prepare distribution pre_processing samplers
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_length,
encoder_input_ids=inputs_tensor,
prefix_allowed_tokens_fn=None,
logits_processor=logits_processor,
device=input_ids.device,
)
# 10. prepare stopping criteria
stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria
)
# expand input_ids with `num_return_sequences` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_return_sequences,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 10b. prepare prefill outputs
generation_mode_kwargs["prefill_outputs"] = self._prefill(input_ids, generation_config, model_kwargs)
# 11. run sample
outputs = self._sample(
input_ids,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
generation_config=generation_config,
**generation_mode_kwargs,
**model_kwargs,
)
if generation_config.return_dict_in_generate:
output_ids = outputs.sequences
else:
output_ids = outputs
# apply the pattern mask to the final ids
output_ids = self.decoder.apply_delay_pattern_mask(output_ids, model_kwargs["decoder_delay_pattern_mask"])
# revert the pattern delay mask by filtering the pad token id
output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape(
batch_size, self.decoder.num_codebooks, -1
)
# append the frame dimension back to the audio codes
output_ids = output_ids[None, ...]
audio_scales = model_kwargs.get("audio_scales")
if audio_scales is None:
audio_scales = [None] * batch_size
if self.decoder.config.audio_channels == 1:
output_values = self.audio_encoder.decode(
output_ids,
audio_scales=audio_scales,
).audio_values
else:
codec_outputs_left = self.audio_encoder.decode(output_ids[:, :, ::2, :], audio_scales=audio_scales)
output_values_left = codec_outputs_left.audio_values
codec_outputs_right = self.audio_encoder.decode(output_ids[:, :, 1::2, :], audio_scales=audio_scales)
output_values_right = codec_outputs_right.audio_values
output_values = torch.cat([output_values_left, output_values_right], dim=1)
if generation_config.return_dict_in_generate:
outputs.sequences = output_values
return outputs
else:
return output_values
def get_unconditional_inputs(self, num_samples=1):
"""
Helper function to get null inputs for unconditional generation, enabling the model to be used without the
feature extractor or tokenizer.
Args:
num_samples (int, *optional*):
Number of audio samples to unconditionally generate.
max_new_tokens (int, *optional*):
Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of
longer inference (since more audio tokens need to be generated per sample).
Example:
```python
>>> from transformers import MusicgenForConditionalGeneration
>>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
>>> # get the unconditional (or 'null') inputs for the model
>>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1)
>>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)
```"""
last_hidden_state = torch.zeros(
(num_samples, 1, self.config.text_encoder.hidden_size), device=self.device, dtype=self.dtype
)
attention_mask = torch.zeros((num_samples, 1), device=self.device, dtype=torch.long)
return MusicgenUnconditionalInput(
encoder_outputs=(last_hidden_state,),
attention_mask=attention_mask,
guidance_scale=1.0,
)
__all__ = ["MusicgenForConditionalGeneration", "MusicgenForCausalLM", "MusicgenModel", "MusicgenPreTrainedModel"]
| MusicgenForConditionalGeneration |
python | huggingface__transformers | src/transformers/models/pixtral/processing_pixtral.py | {
"start": 1773,
"end": 11607
} | class ____(ProcessorMixin):
r"""
Constructs a Pixtral processor which wraps a Pixtral image processor and a Pixtral tokenizer into a single processor.
[`PixtralProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~PixtralProcessor.__call__`] and [`~PixtralProcessor.decode`] for more information.
Args:
image_processor ([`PixtralImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
patch_size (`int`, *optional*, defaults to 16):
Patch size from the vision tower.
spatial_merge_size (`int`, *optional*, defaults to 1):
The downsampling factor for the spatial merge operation.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
image_token (`str`, *optional*, defaults to `"[IMG]"`):
Special token used to denote image location.
image_break_token (`str`, *optional*, defaults to `"[IMG_BREAK]"`):
Special token used to denote the end of a line of pixels in an image.
image_end_token (`str`, *optional*, defaults to `"[IMG_END]"`):
Special token used to denote the end of an image input.
"""
def __init__(
self,
image_processor=None,
tokenizer=None,
patch_size: int = 16,
spatial_merge_size: int = 1,
chat_template=None,
image_token="[IMG]", # set the default and let users change if they have peculiar special tokens in rare cases
image_break_token="[IMG_BREAK]",
image_end_token="[IMG_END]",
**kwargs,
):
self.patch_size = patch_size
self.spatial_merge_size = spatial_merge_size
self.image_token = image_token
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
self.image_break_token = image_break_token
self.image_end_token = image_end_token
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
self.image_break_token_id = tokenizer.convert_tokens_to_ids(self.image_break_token)
self.image_end_token_id = tokenizer.convert_tokens_to_ids(self.image_end_token)
self.image_ids = [self.image_token_id, self.image_break_token_id, self.image_end_token_id]
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
**kwargs: Unpack[PixtralProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
PixtralProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
patch_size = self.patch_size * self.spatial_merge_size
if images is not None:
output_kwargs["images_kwargs"]["patch_size"] = patch_size
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
else:
image_inputs = {}
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
# try to expand inputs in processing if we have the necessary parts
prompt_strings = text
if image_inputs.get("pixel_values") is not None:
# Replace the image token with the expanded image token sequence
image_sizes = iter(image_inputs["image_sizes"])
prompt_strings = []
replace_strings = []
for sample in text:
while self.image_token in sample:
height, width = next(image_sizes)
num_height_tokens = height // patch_size
num_width_tokens = width // patch_size
replace_tokens = [
[self.image_token] * num_width_tokens + [self.image_break_token]
] * num_height_tokens
# Flatten list
replace_tokens = [item for sublist in replace_tokens for item in sublist]
replace_tokens[-1] = self.image_end_token
replace_str = "".join(replace_tokens)
replace_strings.append(replace_str)
sample = sample.replace(self.image_token, "<placeholder>", 1)
while "<placeholder>" in sample:
replace_str = replace_strings.pop(0)
sample = sample.replace("<placeholder>", replace_str, 1)
prompt_strings.append(sample)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[np.isin(array_ids, self.image_ids)] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = PixtralProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
size = images_kwargs.get("size", None) or self.image_processor.size
patch_size = self.patch_size * self.spatial_merge_size
num_image_tokens = []
for height, width in image_sizes:
resized_height, resized_width = get_resize_output_image_size(
np.zeros((height, width, 3)),
size=(size["longest_edge"], size["longest_edge"]),
patch_size=(patch_size, patch_size),
)
num_height_tokens = resized_height // patch_size
num_width_tokens = resized_width // patch_size
num_image_tokens.append((num_width_tokens + 1) * num_height_tokens)
num_image_patches = [1] * len(image_sizes)
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return tokenizer_input_names + image_processor_input_names + ["image_sizes"]
__all__ = ["PixtralProcessor"]
| PixtralProcessor |
python | numba__numba | numba/cuda/tests/cudadrv/test_linker.py | {
"start": 2225,
"end": 10161
} | class ____(CUDATestCase):
_NUMBA_NVIDIA_BINDING_0_ENV = {'NUMBA_CUDA_USE_NVIDIA_BINDING': '0'}
@require_context
def test_linker_basic(self):
'''Simply go through the constructor and destructor
'''
linker = Linker.new(cc=(5, 3))
del linker
def _test_linking(self, eager):
global bar # must be a global; other it is recognized as a freevar
bar = cuda.declare_device('bar', 'int32(int32)')
link = str(test_data_dir / 'jitlink.ptx')
if eager:
args = ['void(int32[:], int32[:])']
else:
args = []
@cuda.jit(*args, link=[link])
def foo(x, y):
i = cuda.grid(1)
x[i] += bar(y[i])
A = np.array([123], dtype=np.int32)
B = np.array([321], dtype=np.int32)
foo[1, 1](A, B)
self.assertTrue(A[0] == 123 + 2 * 321)
def test_linking_lazy_compile(self):
self._test_linking(eager=False)
def test_linking_eager_compile(self):
self._test_linking(eager=True)
def test_linking_cu(self):
bar = cuda.declare_device('bar', 'int32(int32)')
link = str(test_data_dir / 'jitlink.cu')
@cuda.jit(link=[link])
def kernel(r, x):
i = cuda.grid(1)
if i < len(r):
r[i] = bar(x[i])
x = np.arange(10, dtype=np.int32)
r = np.zeros_like(x)
kernel[1, 32](r, x)
# Matches the operation of bar() in jitlink.cu
expected = x * 2
np.testing.assert_array_equal(r, expected)
def test_linking_cu_log_warning(self):
bar = cuda.declare_device('bar', 'int32(int32)')
link = str(test_data_dir / 'warn.cu')
with warnings.catch_warnings(record=True) as w:
ignore_internal_warnings()
@cuda.jit('void(int32)', link=[link])
def kernel(x):
bar(x)
self.assertEqual(len(w), 1, 'Expected warnings from NVRTC')
# Check the warning refers to the log messages
self.assertIn('NVRTC log messages', str(w[0].message))
# Check the message pertaining to the unused variable is provided
self.assertIn('declared but never referenced', str(w[0].message))
def test_linking_cu_error(self):
bar = cuda.declare_device('bar', 'int32(int32)')
link = str(test_data_dir / 'error.cu')
with self.assertRaises(NvrtcError) as e:
@cuda.jit('void(int32)', link=[link])
def kernel(x):
bar(x)
msg = e.exception.args[0]
# Check the error message refers to the NVRTC compile
self.assertIn('NVRTC Compilation failure', msg)
# Check the expected error in the CUDA source is reported
self.assertIn('identifier "SYNTAX" is undefined', msg)
# Check the filename is reported correctly
self.assertIn('in the compilation of "error.cu"', msg)
def test_linking_unknown_filetype_error(self):
expected_err = "Don't know how to link file with extension .cuh"
with self.assertRaisesRegex(RuntimeError, expected_err):
@cuda.jit('void()', link=['header.cuh'])
def kernel():
pass
def test_linking_file_with_no_extension_error(self):
expected_err = "Don't know how to link file with no extension"
with self.assertRaisesRegex(RuntimeError, expected_err):
@cuda.jit('void()', link=['data'])
def kernel():
pass
@skip_if_cuda_includes_missing
def test_linking_cu_cuda_include(self):
link = str(test_data_dir / 'cuda_include.cu')
# An exception will be raised when linking this kernel due to the
# compile failure if CUDA includes cannot be found by Nvrtc.
@cuda.jit('void()', link=[link])
def kernel():
pass
def test_try_to_link_nonexistent(self):
with self.assertRaises(LinkerError) as e:
@cuda.jit('void(int32[::1])', link=['nonexistent.a'])
def f(x):
x[0] = 0
self.assertIn('nonexistent.a not found', e.exception.args)
def test_set_registers_no_max(self):
"""Ensure that the jitted kernel used in the test_set_registers_* tests
uses more than 57 registers - this ensures that test_set_registers_*
are really checking that they reduced the number of registers used from
something greater than the maximum."""
compiled = cuda.jit(func_with_lots_of_registers)
compiled = compiled.specialize(np.empty(32), *range(6))
self.assertGreater(compiled.get_regs_per_thread(), 57)
def test_set_registers_57(self):
compiled = cuda.jit(max_registers=57)(func_with_lots_of_registers)
compiled = compiled.specialize(np.empty(32), *range(6))
self.assertLessEqual(compiled.get_regs_per_thread(), 57)
def test_set_registers_38(self):
compiled = cuda.jit(max_registers=38)(func_with_lots_of_registers)
compiled = compiled.specialize(np.empty(32), *range(6))
self.assertLessEqual(compiled.get_regs_per_thread(), 38)
def test_set_registers_eager(self):
sig = void(float64[::1], int64, int64, int64, int64, int64, int64)
compiled = cuda.jit(sig, max_registers=38)(func_with_lots_of_registers)
self.assertLessEqual(compiled.get_regs_per_thread(), 38)
def test_get_const_mem_size(self):
sig = void(float64[::1])
compiled = cuda.jit(sig)(simple_const_mem)
const_mem_size = compiled.get_const_mem_size()
self.assertGreaterEqual(const_mem_size, CONST1D.nbytes)
def test_get_no_shared_memory(self):
compiled = cuda.jit(func_with_lots_of_registers)
compiled = compiled.specialize(np.empty(32), *range(6))
shared_mem_size = compiled.get_shared_mem_per_block()
self.assertEqual(shared_mem_size, 0)
def test_get_shared_mem_per_block(self):
sig = void(int32[::1], typeof(np.int32))
compiled = cuda.jit(sig)(simple_smem)
shared_mem_size = compiled.get_shared_mem_per_block()
self.assertEqual(shared_mem_size, 400)
def test_get_shared_mem_per_specialized(self):
compiled = cuda.jit(simple_smem)
compiled_specialized = compiled.specialize(
np.zeros(100, dtype=np.int32), np.float64)
shared_mem_size = compiled_specialized.get_shared_mem_per_block()
self.assertEqual(shared_mem_size, 800)
def test_get_max_threads_per_block(self):
compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d)
max_threads = compiled.get_max_threads_per_block()
self.assertGreater(max_threads, 0)
def test_max_threads_exceeded(self):
compiled = cuda.jit("void(int32[::1])")(simple_maxthreads)
max_threads = compiled.get_max_threads_per_block()
nelem = max_threads + 1
ary = np.empty(nelem, dtype=np.int32)
try:
compiled[1, nelem](ary)
except CudaAPIError as e:
self.assertIn("cuLaunchKernel", e.msg)
def test_get_local_mem_per_thread(self):
sig = void(int32[::1], int32[::1], typeof(np.int32))
compiled = cuda.jit(sig)(simple_lmem)
local_mem_size = compiled.get_local_mem_per_thread()
calc_size = np.dtype(np.int32).itemsize * LMEM_SIZE
self.assertGreaterEqual(local_mem_size, calc_size)
def test_get_local_mem_per_specialized(self):
compiled = cuda.jit(simple_lmem)
compiled_specialized = compiled.specialize(
np.zeros(LMEM_SIZE, dtype=np.int32),
np.zeros(LMEM_SIZE, dtype=np.int32),
np.float64)
local_mem_size = compiled_specialized.get_local_mem_per_thread()
calc_size = np.dtype(np.float64).itemsize * LMEM_SIZE
self.assertGreaterEqual(local_mem_size, calc_size)
if __name__ == '__main__':
unittest.main()
| TestLinker |
python | skorch-dev__skorch | skorch/tests/test_scoring.py | {
"start": 76,
"end": 3849
} | class ____:
@pytest.fixture(scope="module")
def data(self, classifier_data):
return classifier_data
@pytest.fixture(scope="module", params=["mean", "sum"])
def reduction(self, request):
return request.param
@pytest.fixture(scope="module")
def net_cls(self):
from skorch import NeuralNetClassifier
return NeuralNetClassifier
@pytest.fixture(scope="module")
def module_cls(self, classifier_module):
return classifier_module
@pytest.fixture(scope="module")
def net(self, net_cls, module_cls, reduction):
return net_cls(module_cls, lr=0.1, criterion__reduction=reduction)
@pytest.fixture(scope="module")
def net_fit(self, net, data):
X, y = data
return net.fit(X, y)
@pytest.fixture(scope="module")
def loss_scoring_fn(self):
from skorch.scoring import loss_scoring
return loss_scoring
@pytest.fixture(scope="module")
def scored_net_cls(self, net_cls, loss_scoring_fn):
class ScoredNet(net_cls):
def score(self, X, y=None):
return loss_scoring_fn(self, X, y)
return ScoredNet
@pytest.fixture(scope="module")
def scored_net(self, scored_net_cls, module_cls, reduction):
return scored_net_cls(
module_cls, lr=0.01, criterion__reduction=reduction
)
@pytest.fixture(scope="module")
def scored_net_fit(self, scored_net, data):
X, y = data
return scored_net.fit(X, y)
def test_score_unfit_net_raises(self, loss_scoring_fn, net, data):
from skorch.exceptions import NotInitializedError
X, y = data
with pytest.raises(NotInitializedError):
loss_scoring_fn(net, X, y)
def test_score_unfit_scored_net_raises(self, scored_net, data):
from skorch.exceptions import NotInitializedError
X, y = data
with pytest.raises(NotInitializedError):
scored_net.score(X, y)
def test_nonnull_sample_weight_raises(self, loss_scoring_fn, net_fit, data):
X, y = data
with pytest.raises(NotImplementedError):
loss_scoring_fn(
net_fit, X, y, sample_weight=np.random.rand(X.shape[0])
)
def test_scored_net_output_type(self, scored_net_fit, data):
X, y = data
score_value = scored_net_fit.score(X, y)
assert np.isscalar(score_value)
def test_score_on_net_fit(self, loss_scoring_fn, net_fit, data):
X, y = data
score_value = loss_scoring_fn(net_fit, X, y)
assert np.isscalar(score_value)
def test_scored_net_matches_criterion_value(self, scored_net_fit, data):
X, y = data
y_val_proba = torch.as_tensor(scored_net_fit.predict_proba(X))
loss_value = scored_net_fit.get_loss(y_val_proba, y)
score_value = scored_net_fit.score(X, y)
assert np.allclose(score_value, loss_value.item())
def test_scored_net_with_reduction_none(
self, scored_net_cls, module_cls, reduction, data
):
X, y = data
net = scored_net_cls(
module_cls, lr=0.01, criterion__reduction=reduction
).fit(X, y)
net.set_params(criterion__reduction="sum")
loss_value = net.score(X, y)
net.set_params(criterion__reduction="none")
output = net.score(X, y)
assert output.shape[0] == X.shape[0]
assert np.allclose(output.sum(), loss_value)
def test_score_unknown_reduction_raises(
self, loss_scoring_fn, net_fit, data
):
X, y = data
net_fit.set_params(criterion__reduction="unk")
with pytest.raises(ValueError, match="for reduction but got"):
loss_scoring_fn(net_fit, X, y)
| TestLossScoring |
python | pypa__pipenv | pipenv/vendor/tomlkit/toml_document.py | {
"start": 56,
"end": 124
} | class ____(Container):
"""
A TOML document.
"""
| TOMLDocument |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 9981,
"end": 10070
} | class ____(BitwiseShiftOperation):
pass
@infer_global(operator.ilshift)
| BitwiseLeftShift |
python | spyder-ide__spyder | spyder/plugins/updatemanager/workers.py | {
"start": 13935,
"end": 19189
} | class ____(BaseWorker):
"""
Worker that checks and updates Spyder-updater without blocking
the Spyder user interface.
"""
def __init__(self, stable_only):
super().__init__()
self.stable_only = stable_only
self.asset_info = None
self.installer_path = None
self.error = None
__, updater_version = get_updater_info()
self.updater_version = parse(updater_version)
def _check_asset_available(self):
"""Checks if there is an update available for the Updater."""
# Get release info from Github
releases = get_github_releases(updater=True)
if self.stable_only:
# Only use stable releases
releases = {
k: v for k, v in releases.items() if not k.is_prerelease
}
logger.debug(f"Available releases: {sorted(releases)}")
self.asset_info = _check_asset_available(
releases, self.updater_version, updater=True
)
def _clean_installer_dir(self):
"""Remove downloaded file"""
installer_dir = osp.dirname(self.installer_path)
if osp.exists(installer_dir):
try:
shutil.rmtree(installer_dir)
except OSError as err:
logger.debug(err, stack_info=True)
def _download_asset(self):
"""Download Updater lock file"""
self.installer_path = osp.join(
get_temp_dir(),
"updates",
"spyder-updater",
str(self.asset_info["version"]),
self.asset_info["filename"],
)
if (
osp.exists(self.installer_path)
and validate_download(
self.installer_path, self.asset_info["checksum"]
)
):
logger.debug(f"{self.installer_path} already downloaded.")
return
self._clean_installer_dir()
dirname = osp.dirname(self.installer_path)
os.makedirs(dirname, exist_ok=True)
url = self.asset_info["url"]
logger.info(f"Downloading {url} to {self.installer_path}")
page = requests.get(url, headers=GH_HEADERS)
page.raise_for_status()
with open(self.installer_path, 'wb') as f:
f.write(page.content)
if validate_download(self.installer_path, self.asset_info["checksum"]):
logger.info('Download successfully completed.')
else:
raise UpdateDownloadError("Download failed!")
def _install_update(self):
"""Install or update Spyder-updater environment."""
dirname = osp.dirname(self.installer_path)
if os.name == "nt":
plat = "win-64"
elif sys.platform == "darwin":
plat = "osx-arm64" if platform.machine() == "arm64" else "osx-64"
else:
plat = "linux-64"
spy_updater_lock = osp.join(dirname, f"conda-updater-{plat}.lock")
spy_updater_conda = glob(osp.join(dirname, "spyder-updater*.conda"))[0]
conda_exe = find_conda()
conda_cmd = "create"
if self.updater_version > parse("0.0.0"):
conda_cmd = "update"
env_path = osp.join(osp.dirname(sys.prefix), "spyder-updater")
cmd = [
# Update spyder-updater environment
conda_exe,
conda_cmd, "--yes",
"--prefix", env_path,
"--file", spy_updater_lock,
"&&",
# Update spyder-updater
conda_exe,
"install", "--yes",
"--prefix", env_path,
"--no-deps",
"--force-reinstall",
spy_updater_conda
]
logger.debug(f"""Conda command for the updater: '{" ".join(cmd)}'""")
proc = subprocess.run(
" ".join(cmd), shell=True, capture_output=True, text=True
)
proc.check_returncode()
def start(self):
"""Main method of the worker."""
try:
self._check_asset_available()
if (
self.asset_info is None
and self.updater_version == parse("0.0.0")
):
raise RuntimeError(
"Spyder-updater is not installed and "
"not available for download!"
)
elif self.asset_info is not None:
self._download_asset()
self._install_update()
except Exception as err:
# Send untracked errors to our error reporter
self.error = str(err)
error_data = dict(
text=traceback.format_exc(),
is_traceback=True,
title=_("Error when updating Spyder-updater"),
)
self.sig_exception_occurred.emit(error_data)
logger.error(err, exc_info=err)
finally:
# At this point we **must** emit the signal below so that the
# "Check for updates" action in the Help menu is enabled again
# after the check has finished (it's disabled while the check is
# running).
try:
self.sig_ready.emit(self.error is None)
except RuntimeError:
pass
| WorkerUpdateUpdater |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_legacy_class_based/_documenters.py | {
"start": 43770,
"end": 46852
} | class ____:
"""Mixin for FunctionDocumenter and MethodDocumenter to provide the
feature of reading the signature from the docstring.
"""
_new_docstrings: list[list[str]] | None = None
_signatures: list[str] = []
def _find_signature(self) -> tuple[str | None, str | None] | None:
# candidates of the object name
valid_names = [self.objpath[-1]] # type: ignore[attr-defined]
if isinstance(self, ClassDocumenter):
valid_names.append('__init__')
if hasattr(self.object, '__mro__'):
valid_names.extend(cls.__name__ for cls in self.object.__mro__)
docstrings = self.get_doc()
if docstrings is None:
return None, None
self._new_docstrings = docstrings[:]
self._signatures = []
result = None
for i, doclines in enumerate(docstrings):
for j, line in enumerate(doclines):
if not line:
# no lines in docstring, no match
break
if line.endswith('\\'):
line = line.rstrip('\\').rstrip()
# match first line of docstring against signature RE
match = py_ext_sig_re.match(line)
if not match:
break
_exmod, _path, base, _tp_list, args, retann = match.groups()
# the base name must match ours
if base not in valid_names:
break
# re-prepare docstring to ignore more leading indentation
directive = self.directive # type: ignore[attr-defined]
tab_width = directive.state.document.settings.tab_width
self._new_docstrings[i] = prepare_docstring(
'\n'.join(doclines[j + 1 :]), tab_width
)
if result is None:
# first signature
result = args, retann
else:
# subsequent signatures
self._signatures.append(f'({args}) -> {retann}')
if result is not None:
# finish the loop when signature found
break
return result
def get_doc(self) -> list[list[str]] | None:
if self._new_docstrings is not None:
return self._new_docstrings
return super().get_doc() # type: ignore[misc]
def format_signature(self, **kwargs: Any) -> str:
self.args: str | None
if self.args is None and self.config.autodoc_docstring_signature: # type: ignore[attr-defined]
# only act if a signature is not explicitly given already, and if
# the feature is enabled
result = self._find_signature()
if result is not None:
self.args, self.retann = result
sig = super().format_signature(**kwargs) # type: ignore[misc]
if self._signatures:
return '\n'.join((sig, *self._signatures))
else:
return sig
| DocstringSignatureMixin |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/utils/backward_compatibility.py | {
"start": 700,
"end": 1034
} | class ____(Exception):
def __init__(self, error_message: str, context: BackwardIncompatibilityContext) -> None:
self.error_message = error_message
self.context = context
super().__init__(error_message)
def __str__(self):
return f"{self.context} - {self.error_message}"
| NonBackwardCompatibleError |
python | getsentry__sentry | tests/sentry/notifications/notification_action/metric_alert_registry/test_msteams_metric_alert_handler.py | {
"start": 1131,
"end": 8376
} | class ____(MetricAlertHandlerBase):
def setUp(self) -> None:
self.create_models()
self.action = self.create_action(
type=Action.Type.MSTEAMS,
integration_id=1234567890,
config={
"target_identifier": "channel123",
"target_display": "Channel 123",
"target_type": ActionTarget.SPECIFIC,
},
)
self.handler = MSTeamsMetricAlertHandler()
@mock.patch("sentry.integrations.msteams.utils.send_incident_alert_notification")
@freeze_time("2021-01-01 00:00:00")
def test_send_alert(self, mock_send_incident_alert_notification: mock.MagicMock) -> None:
notification_context = NotificationContext.from_action_model(self.action)
assert self.group_event.occurrence is not None
assert self.group_event.occurrence.priority is not None
alert_context = AlertContext.from_workflow_engine_models(
self.detector,
self.evidence_data,
self.group_event.group.status,
DetectorPriorityLevel(self.group_event.occurrence.priority),
)
metric_issue_context = MetricIssueContext.from_group_event(
self.group,
self.evidence_data,
DetectorPriorityLevel(self.group_event.occurrence.priority),
)
open_period_context = OpenPeriodContext.from_group(self.group)
notification_uuid = str(uuid.uuid4())
self.handler.send_alert(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
open_period_context=open_period_context,
trigger_status=TriggerStatus.ACTIVE,
project=self.detector.project,
organization=self.detector.project.organization,
notification_uuid=notification_uuid,
)
mock_send_incident_alert_notification.assert_called_once_with(
organization=self.detector.project.organization,
alert_context=alert_context,
notification_context=notification_context,
metric_issue_context=metric_issue_context,
open_period_context=open_period_context,
alert_rule_serialized_response=get_alert_rule_serializer(self.detector),
incident_serialized_response=get_detailed_incident_serializer(self.open_period),
notification_uuid=notification_uuid,
)
@mock.patch(
"sentry.notifications.notification_action.metric_alert_registry.MSTeamsMetricAlertHandler.send_alert"
)
@freeze_time("2021-01-01 00:00:00")
def test_invoke_legacy_registry(self, mock_send_alert: mock.MagicMock) -> None:
self.handler.invoke_legacy_registry(self.event_data, self.action, self.detector)
assert mock_send_alert.call_count == 1
(
notification_context,
alert_context,
metric_issue_context,
open_period_context,
organization,
notification_uuid,
) = self.unpack_kwargs(mock_send_alert)
self.assert_notification_context(
notification_context,
integration_id=1234567890,
target_identifier="channel123",
target_display="Channel 123",
sentry_app_config=None,
sentry_app_id=None,
)
self.assert_alert_context(
alert_context,
name=self.detector.name,
action_identifier_id=self.detector.id,
threshold_type=AlertRuleThresholdType.ABOVE,
detection_type=AlertRuleDetectionType.STATIC,
comparison_delta=None,
alert_threshold=self.evidence_data.conditions[0]["comparison"],
)
self.assert_metric_issue_context(
metric_issue_context,
open_period_identifier=self.open_period.id,
snuba_query=self.snuba_query,
new_status=IncidentStatus.CRITICAL,
metric_value=123.45,
group=self.group_event.group,
title=self.group_event.group.title,
subscription=self.subscription,
)
self.assert_open_period_context(
open_period_context,
id=self.open_period.id,
date_started=self.group_event.group.first_seen,
date_closed=None,
)
assert organization == self.detector.project.organization
assert isinstance(notification_uuid, str)
@mock.patch(
"sentry.notifications.notification_action.metric_alert_registry.MSTeamsMetricAlertHandler.send_alert"
)
@freeze_time("2021-01-01 00:00:00")
def test_invoke_legacy_registry_with_activity(self, mock_send_alert: mock.MagicMock) -> None:
# Create an Activity instance with evidence data and priority
activity_data = asdict(self.evidence_data)
activity = Activity(
project=self.project,
group=self.group,
type=ActivityType.SET_RESOLVED.value,
data=activity_data,
)
activity.save()
# Create event data with Activity instead of GroupEvent
event_data_with_activity = WorkflowEventData(
event=activity,
workflow_env=self.workflow.environment,
group=self.group,
)
self.handler.invoke_legacy_registry(event_data_with_activity, self.action, self.detector)
assert mock_send_alert.call_count == 1
(
notification_context,
alert_context,
metric_issue_context,
open_period_context,
organization,
notification_uuid,
) = self.unpack_kwargs(mock_send_alert)
# Verify that the same data is extracted from Activity.data as from GroupEvent.occurrence.evidence_data
self.assert_notification_context(
notification_context,
integration_id=1234567890,
target_identifier="channel123",
target_display="Channel 123",
sentry_app_config=None,
sentry_app_id=None,
)
self.assert_alert_context(
alert_context,
name=self.detector.name,
action_identifier_id=self.detector.id,
threshold_type=AlertRuleThresholdType.BELOW,
detection_type=AlertRuleDetectionType.STATIC,
comparison_delta=None,
alert_threshold=self.evidence_data.conditions[2]["comparison"],
)
self.assert_metric_issue_context(
metric_issue_context,
open_period_identifier=self.open_period.id,
snuba_query=self.snuba_query,
new_status=IncidentStatus.CLOSED,
metric_value=123.45,
group=self.group,
title=self.group.title,
subscription=self.subscription,
)
self.assert_open_period_context(
open_period_context,
id=self.open_period.id,
date_started=self.group.first_seen,
date_closed=None,
)
assert organization == self.detector.project.organization
assert isinstance(notification_uuid, str)
| TestMsteamsMetricAlertHandler |
python | plotly__plotly.py | plotly/graph_objs/choroplethmapbox/_colorbar.py | {
"start": 233,
"end": 61680
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choroplethmapbox"
_path_str = "choroplethmapbox.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.choroplethmapbox.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.choroplethmapbox.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.choroplethmapb
ox.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
choroplethmapbox.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.choroplethmapbox.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.choroplethmapbo
x.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.chorop
lethmapbox.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
choroplethmapbox.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.choroplethmapbox.colorbar.
Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choroplethmapbox.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.choroplethmapbo
x.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.chorop
lethmapbox.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
choroplethmapbox.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.choroplethmapbox.colorbar.
Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choroplethmapbox.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmapbox.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | prabhupant__python-ds | data_structures/binary_trees/check_cousin.py | {
"start": 120,
"end": 1182
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def level(root, node, lev):
if not root:
return 0
if root == node:
return lev
l = level(root.left, node, lev+1)
if not l == 0:
return l
l = level(root.right, node, lev+1)
def is_sibling(root, a, b):
if not root:
return False
return (root.left == a and root.right == b) or \
(root.right == b and root.left == a) or \
is_sibling(root.left, a, b) or \
is_sibling(root.right, a, b)
def is_cousin(root, a, b):
if level(root, a, 1) == level(root, b, 1) and not is_sibling(root, a, b):
return True
else:
return False
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.left.right.right = Node(15)
root.right.left = Node(6)
root.right.right = Node(7)
root.right.left.right = Node(8)
node1 = root.left.right
node2 = root.right.right
print(is_cousin(root, node1, node2)) | Node |
python | davidhalter__jedi | test/completion/goto.py | {
"start": 2598,
"end": 3297
} | class ____():
""" abc """
pass
# -----------------
# params
# -----------------
param = ClassDef
#! 8 ['param param']
def ab1(param): pass
#! 9 ['param param']
def ab2(param): pass
#! 11 ['param = ClassDef']
def ab3(a=param): pass
ab1(ClassDef);ab2(ClassDef);ab3(ClassDef)
# -----------------
# for loops
# -----------------
for i in range(1):
#! ['for i in range(1): i']
i
for key, value in [(1,2)]:
#! ['for key, value in [(1,2)]: key']
key
#! 4 ['for y in [1]: y']
for y in [1]:
#! ['for y in [1]: y']
y
# -----------------
# decorator
# -----------------
def dec(dec_param=3):
pass
#! 8 ['param dec_param=3']
@dec(dec_param=5)
def y():
pass
| ClassDef |
python | google__jax | tests/debug_nans_test.py | {
"start": 7091,
"end": 8919
} | class ____(jtu.JaxTestCase):
def testSingleResultPrimitiveNoInf(self):
A = jnp.array([[1., 2.], [2., 3.]])
ans = jnp.tanh(A)
ans.block_until_ready()
def testMultipleResultPrimitiveNoInf(self):
A = jnp.array([[1., 2.], [2., 3.]])
ans, _ = jnp.linalg.eigh(A)
ans.block_until_ready()
def testJitComputationNoInf(self):
A = jnp.array([[1., 2.], [2., 3.]])
ans = jax.jit(jnp.tanh)(A)
ans.block_until_ready()
def testSingleResultPrimitiveInf(self):
A = jnp.array(0.)
with self.assertRaises(FloatingPointError):
ans = 1. / A
ans.block_until_ready()
@jtu.sample_product(jit=jtu.JIT_IMPLEMENTATION)
def testCallDeoptimized(self, jit):
@jit
def f(x):
return jax.lax.cond(
x == 1, lambda _: np.inf, lambda _: 2., operand=None)
# This makes sure, when using the C++ jit, that the Python code has been
# run to compile, and the next call won't go through `cache_miss`.
f(2)
# 'cond' not 'xla_call'
msg = r"invalid value \(inf\) encountered in .*cond.*"
with self.assertRaisesRegex(FloatingPointError, msg):
f(1)
def testDebugNansDoesntCorruptCaches(self):
# https://github.com/jax-ml/jax/issues/6614
@jax.jit
def f(x):
return jnp.divide(x, x)
for _ in range(2):
try:
with jax.debug_nans(True):
jax.grad(f)(0.)
except FloatingPointError:
pass
def testDebugNansDoesntReturnDeoptimizedResult(self):
@jax.jit
def f(x):
y = x + 2 # avoid trivial dispatch path by adding some eqn
return jnp.nan, y
with self.assertRaisesRegex(FloatingPointError, "the de-optimized function did not .*literal"):
with jax.debug_nans(True):
f(3)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| DebugInfsTest |
python | fastapi__sqlmodel | docs_src/tutorial/offset_and_limit/tutorial003_py310.py | {
"start": 71,
"end": 1593
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).offset(6).limit(3)
results = session.exec(statement)
heroes = results.all()
print(heroes)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | un33k__django-uuslug | uuslug/tests/tests.py | {
"start": 10029,
"end": 10197
} | class ____(TestCase):
def test_uuslug_checks_for_model_instance(self):
self.assertRaises(Exception, uuslug, 'test_slug', CoolSlug)
| ModelInstanceExeptionTestCase |
python | keon__algorithms | tests/test_dfs.py | {
"start": 805,
"end": 1186
} | class ____(unittest.TestCase):
def test_num_islands(self):
self.assertEqual(1, num_islands([[1, 1, 1, 1, 0], [1, 1, 0, 1, 0],
[1, 1, 0, 0, 0], [0, 0, 0, 0, 0]]))
self.assertEqual(3, num_islands([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0],
[0, 0, 1, 0, 0], [0, 0, 0, 1, 1]]))
| TestCountIslands |
python | huggingface__transformers | src/transformers/models/detr/configuration_detr.py | {
"start": 921,
"end": 12438
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DetrModel`]. It is used to instantiate a DETR
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the DETR
[facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
API.
backbone_config (`PreTrainedConfig` or `dict`, *optional*):
The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
case it will default to `ResNetConfig()`.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_queries (`int`, *optional*, defaults to 100):
Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetrModel`] can
detect in a single image. For COCO, we recommend 100 queries.
d_model (`int`, *optional*, defaults to 256):
This parameter is a general dimension parameter, defining dimensions for components such as the encoder layer and projection parameters in the decoder layer, among others.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
position_embedding_type (`str`, *optional*, defaults to `"sine"`):
Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
backbone (`str`, *optional*, defaults to `"resnet50"`):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, `True`):
Whether to use pretrained weights for the backbone.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
dilation (`bool`, *optional*, defaults to `False`):
Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
`use_timm_backbone` = `True`.
class_cost (`float`, *optional*, defaults to 1):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
mask_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the Focal loss in the panoptic segmentation loss.
dice_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.1):
Relative classification weight of the 'no-object' class in the object detection loss.
Examples:
```python
>>> from transformers import DetrConfig, DetrModel
>>> # Initializing a DETR facebook/detr-resnet-50 style configuration
>>> configuration = DetrConfig()
>>> # Initializing a model (with random weights) from the facebook/detr-resnet-50 style configuration
>>> model = DetrModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "detr"
sub_configs = {"backbone_config": AutoConfig}
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
use_timm_backbone=True,
backbone_config=None,
num_channels=3,
num_queries=100,
encoder_layers=6,
encoder_ffn_dim=2048,
encoder_attention_heads=8,
decoder_layers=6,
decoder_ffn_dim=2048,
decoder_attention_heads=8,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
is_encoder_decoder=True,
activation_function="relu",
d_model=256,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
init_xavier_std=1.0,
auxiliary_loss=False,
position_embedding_type="sine",
backbone="resnet50",
use_pretrained_backbone=True,
backbone_kwargs=None,
dilation=False,
class_cost=1,
bbox_cost=5,
giou_cost=2,
mask_loss_coefficient=1,
dice_loss_coefficient=1,
bbox_loss_coefficient=5,
giou_loss_coefficient=2,
eos_coefficient=0.1,
**kwargs,
):
# We default to values which were previously hard-coded in the model. This enables configurability of the config
# while keeping the default behavior the same.
if use_timm_backbone and backbone_kwargs is None:
backbone_kwargs = {}
if dilation:
backbone_kwargs["output_stride"] = 16
backbone_kwargs["out_indices"] = [1, 2, 3, 4]
backbone_kwargs["in_chans"] = num_channels
# Backwards compatibility
elif not use_timm_backbone and backbone in (None, "resnet50"):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
backbone = None
# set timm attributes to None
dilation = None
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.use_timm_backbone = use_timm_backbone
self.backbone_config = backbone_config
self.num_channels = num_channels
self.num_queries = num_queries
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.num_hidden_layers = encoder_layers
self.auxiliary_loss = auxiliary_loss
self.position_embedding_type = position_embedding_type
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.backbone_kwargs = backbone_kwargs
self.dilation = dilation
# Hungarian matcher
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
# Loss coefficients
self.mask_loss_coefficient = mask_loss_coefficient
self.dice_loss_coefficient = dice_loss_coefficient
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.eos_coefficient = eos_coefficient
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
__all__ = ["DetrConfig"]
| DetrConfig |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 11184,
"end": 11676
} | class ____(ConcreteTemplate):
# Note Numba follows the Numpy semantics of returning a bool,
# while Python returns an int. This makes it consistent with
# np.invert() and makes array expressions correct.
cases = [signature(types.boolean, types.boolean)]
cases += [signature(choose_result_int(op), op) for op in sorted(types.unsigned_domain)]
cases += [signature(choose_result_int(op), op) for op in sorted(types.signed_domain)]
unsafe_casting = False
| BitwiseInvert |
python | pallets__flask | src/flask/sessions.py | {
"start": 556,
"end": 1524
} | class ____(MutableMapping[str, t.Any]):
"""Expands a basic dictionary with session attributes."""
@property
def permanent(self) -> bool:
"""This reflects the ``'_permanent'`` key in the dict."""
return self.get("_permanent", False) # type: ignore[no-any-return]
@permanent.setter
def permanent(self, value: bool) -> None:
self["_permanent"] = bool(value)
#: Some implementations can detect whether a session is newly
#: created, but that is not guaranteed. Use with caution. The mixin
# default is hard-coded ``False``.
new = False
#: Some implementations can detect changes to the session and set
#: this when that happens. The mixin default is hard coded to
#: ``True``.
modified = True
#: Some implementations can detect when session data is read or
#: written and set this when that happens. The mixin default is hard
#: coded to ``True``.
accessed = True
| SessionMixin |
python | lazyprogrammer__machine_learning_examples | rl2/a3c/worker.py | {
"start": 3167,
"end": 9023
} | class ____:
def __init__(
self,
name,
env,
policy_net,
value_net,
global_counter,
returns_list,
discount_factor=0.99,
max_global_steps=None):
self.name = name
self.env = env
self.global_policy_net = policy_net
self.global_value_net = value_net
self.global_counter = global_counter
self.discount_factor = discount_factor
self.max_global_steps = max_global_steps
self.global_step = tf.train.get_global_step()
self.img_transformer = ImageTransformer()
# Create local policy and value networks that belong only to this worker
with tf.variable_scope(name):
# self.policy_net = PolicyNetwork(num_outputs=policy_net.num_outputs)
# self.value_net = ValueNetwork()
self.policy_net, self.value_net = create_networks(policy_net.num_outputs)
# We will use this op to copy the global network weights
# back to the local policy and value networks
self.copy_params_op = get_copy_params_op(
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="global"),
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/'))
# These will take the gradients from the local networks
# and use those gradients to update the global network
self.vnet_train_op = make_train_op(self.value_net, self.global_value_net)
self.pnet_train_op = make_train_op(self.policy_net, self.global_policy_net)
self.state = None # Keep track of the current state
self.total_reward = 0. # After each episode print the total (sum of) reward
self.returns_list = returns_list # Global returns list to plot later
def run(self, sess, coord, t_max):
with sess.as_default(), sess.graph.as_default():
# Assign the initial state
self.state = repeat_frame(self.img_transformer.transform(self.env.reset()))
try:
while not coord.should_stop():
# Copy weights from global networks to local networks
sess.run(self.copy_params_op)
# Collect some experience
steps, global_step = self.run_n_steps(t_max, sess)
# Stop once the max number of global steps has been reached
if self.max_global_steps is not None and global_step >= self.max_global_steps:
coord.request_stop()
return
# Update the global networks using local gradients
self.update(steps, sess)
except tf.errors.CancelledError:
return
def sample_action(self, state, sess):
# Make input N x D (N = 1)
feed_dict = { self.policy_net.states: [state] }
actions = sess.run(self.policy_net.sample_action, feed_dict)
# Prediction is a 1-D array of length N, just want the first value
return actions[0]
def get_value_prediction(self, state, sess):
# Make input N x D (N = 1)
feed_dict = { self.value_net.states: [state] }
vhat = sess.run(self.value_net.vhat, feed_dict)
# Prediction is a 1-D array of length N, just want the first value
return vhat[0]
def run_n_steps(self, n, sess):
steps = []
for _ in range(n):
# Take a step
action = self.sample_action(self.state, sess)
next_frame, reward, done, _ = self.env.step(action)
# Shift the state to include the latest frame
next_state = shift_frames(self.state, self.img_transformer.transform(next_frame))
# Save total return
if done:
print("Total reward:", self.total_reward, "Worker:", self.name)
self.returns_list.append(self.total_reward)
if len(self.returns_list) > 0 and len(self.returns_list) % 100 == 0:
print("*** Total average reward (last 100):", np.mean(self.returns_list[-100:]), "Collected so far:", len(self.returns_list))
self.total_reward = 0.
else:
self.total_reward += reward
# Save step
step = Step(self.state, action, reward, next_state, done)
steps.append(step)
# Increase local and global counters
global_step = next(self.global_counter)
if done:
self.state = repeat_frame(self.img_transformer.transform(self.env.reset()))
break
else:
self.state = next_state
return steps, global_step
def update(self, steps, sess):
"""
Updates global policy and value networks using the local networks' gradients
"""
# In order to accumulate the total return
# We will use V_hat(s') to predict the future returns
# But we will use the actual rewards if we have them
# Ex. if we have s1, s2, s3 with rewards r1, r2, r3
# Then G(s3) = r3 + V(s4)
# G(s2) = r2 + r3 + V(s4)
# G(s1) = r1 + r2 + r3 + V(s4)
reward = 0.0
if not steps[-1].done:
reward = self.get_value_prediction(steps[-1].next_state, sess)
# Accumulate minibatch samples
states = []
advantages = []
value_targets = []
actions = []
# loop through steps in reverse order
for step in reversed(steps):
reward = step.reward + self.discount_factor * reward
advantage = reward - self.get_value_prediction(step.state, sess)
# Accumulate updates
states.append(step.state)
actions.append(step.action)
advantages.append(advantage)
value_targets.append(reward)
feed_dict = {
self.policy_net.states: np.array(states),
self.policy_net.advantage: advantages,
self.policy_net.actions: actions,
self.value_net.states: np.array(states),
self.value_net.targets: value_targets,
}
# Train the global estimators using local gradients
global_step, pnet_loss, vnet_loss, _, _ = sess.run([
self.global_step,
self.policy_net.loss,
self.value_net.loss,
self.pnet_train_op,
self.vnet_train_op,
], feed_dict)
# Theoretically could plot these later
return pnet_loss, vnet_loss
| Worker |
python | PyCQA__pylint | tests/functional/t/typing_generic.py | {
"start": 502,
"end": 591
} | class ____(ABC, Generic[Anything]):
def a_method(self) -> None:
print("hello")
| A |
python | apache__airflow | task-sdk/src/airflow/sdk/bases/decorator.py | {
"start": 26209,
"end": 28543
} | class ____(Protocol):
"""Type declaration for ``task_decorator_factory`` return type."""
@overload
def __call__( # type: ignore[misc]
self,
python_callable: Callable[FParams, FReturn],
) -> Task[FParams, FReturn]:
"""For the "bare decorator" ``@task`` case."""
@overload
def __call__(
self,
*,
multiple_outputs: bool | None = None,
**kwargs: Any,
) -> Callable[[Callable[FParams, FReturn]], Task[FParams, FReturn]]:
"""For the decorator factory ``@task()`` case."""
def override(self, **kwargs: Any) -> Task[FParams, FReturn]: ...
def task_decorator_factory(
python_callable: Callable | None = None,
*,
multiple_outputs: bool | None = None,
decorated_operator_class: type[BaseOperator],
**kwargs,
) -> TaskDecorator:
"""
Generate a wrapper that wraps a function into an Airflow operator.
Can be reused in a single Dag.
:param python_callable: Function to decorate.
:param multiple_outputs: If set to True, the decorated function's return
value will be unrolled to multiple XCom values. Dict will unroll to XCom
values with its keys as XCom keys. If set to False (default), only at
most one XCom value is pushed.
:param decorated_operator_class: The operator that executes the logic needed
to run the python function in the correct environment.
Other kwargs are directly forwarded to the underlying operator class when
it's instantiated.
"""
if multiple_outputs is None:
multiple_outputs = cast("bool", attr.NOTHING)
if python_callable:
decorator = _TaskDecorator(
function=python_callable,
multiple_outputs=multiple_outputs,
operator_class=decorated_operator_class,
kwargs=kwargs,
)
return cast("TaskDecorator", decorator)
if python_callable is not None:
raise TypeError("No args allowed while using @task, use kwargs instead")
def decorator_factory(python_callable):
return _TaskDecorator(
function=python_callable,
multiple_outputs=multiple_outputs,
operator_class=decorated_operator_class,
kwargs=kwargs,
)
return cast("TaskDecorator", decorator_factory)
| TaskDecorator |
python | pyca__cryptography | src/cryptography/fernet.py | {
"start": 596,
"end": 661
} | class ____(Exception):
pass
_MAX_CLOCK_SKEW = 60
| InvalidToken |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 101722,
"end": 102015
} | class ____(SCIMTestCase):
provider = ACTIVE_DIRECTORY_PROVIDER_NAME
@pytest.fixture(autouse=True)
def _use_dummy_provider_for_ad_provider(self) -> Generator[None]:
with mock.patch.object(auth.manager, "get", return_value=DummyProvider()):
yield
| SCIMAzureTestCase |
python | great-expectations__great_expectations | tests/scripts/test_public_api_report.py | {
"start": 27557,
"end": 28705
} | class ____:
def test_generate_printable_definitions(self, public_api_report: PublicAPIReport):
expected: List[str] = [
"File: sample_with_definitions_python_file_string.py Name: ExampleClass",
"File: sample_with_definitions_python_file_string.py Name: example_classmethod",
"File: sample_with_definitions_python_file_string.py Name: example_method",
"File: sample_with_definitions_python_file_string.py Name: example_method_with_args",
"File: sample_with_definitions_python_file_string.py Name: "
"example_module_level_function",
"File: sample_with_definitions_python_file_string.py Name: example_staticmethod",
]
observed = [str(p) for p in public_api_report.generate_printable_definitions()]
assert observed == expected
def test_generate_printable_definitions_exclude_by_file(
self, public_api_report_filter_out_file: PublicAPIReport
):
expected: List[str] = []
observed = public_api_report_filter_out_file.generate_printable_definitions()
assert observed == expected
| TestPublicAPIReport |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 18555,
"end": 18945
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = (
"SAML_EXTERNAL_IDENTITY_MISSING",
"SAML_SSO_ENFORCEMENT_REQUIRES_EXTERNAL_IDENTITY",
"TWO_FACTOR_ACCOUNT_RECOVERY",
"TWO_FACTOR_REQUIREMENT_NON_COMPLIANCE",
"USER_ACCOUNT_DELETED",
)
| OrgRemoveMemberAuditEntryReason |
python | django__django | tests/admin_views/models.py | {
"start": 21565,
"end": 21690
} | class ____(models.Model):
title = models.CharField(max_length=100)
def __str__(self):
return self.title
| Report |
python | getsentry__sentry | src/sentry/api/endpoints/organization_trace_item_attributes.py | {
"start": 16446,
"end": 26151
} | class ____(BaseSpanFieldValuesAutocompletionExecutor):
def __init__(
self,
organization: Organization,
snuba_params: SnubaParams,
key: str,
query: str | None,
limit: int,
offset: int,
definitions: ColumnDefinitions,
):
super().__init__(organization, snuba_params, key, query, limit)
self.limit = limit
self.offset = offset
self.resolver = SearchResolver(
params=snuba_params, config=SearchResolverConfig(), definitions=definitions
)
self.search_type, self.attribute_key, self.context_definition = self.resolve_attribute_key(
key
)
self.autocomplete_function: dict[str, Callable[[], list[TagValue]]] = (
{key: self.project_id_autocomplete_function for key in self.PROJECT_ID_KEYS}
| {key: self.project_slug_autocomplete_function for key in self.PROJECT_SLUG_KEYS}
| {
RELEASE_STAGE_ALIAS: self.release_stage_autocomplete_function,
SEMVER_ALIAS: self.semver_autocomplete_function,
SEMVER_BUILD_ALIAS: self.semver_build_autocomplete_function,
SEMVER_PACKAGE_ALIAS: self.semver_package_autocomplete_function,
"timestamp": self.skip_autocomplete,
}
)
def resolve_attribute_key(
self, key: str
) -> tuple[constants.SearchType, AttributeKey, VirtualColumnDefinition | None]:
resolved_attr, context_definition = self.resolver.resolve_attribute(key)
if context_definition:
resolved_attr = self.resolver.map_context_to_original_column(context_definition)
return (
resolved_attr.search_type,
resolved_attr.proto_definition,
context_definition,
)
def execute(self) -> list[TagValue]:
func = self.autocomplete_function.get(self.key)
if func is not None:
return func()
if self.search_type == "boolean":
return self.boolean_autocomplete_function()
if self.search_type == "string":
return self.string_autocomplete_function()
return []
def release_stage_autocomplete_function(self):
return [
TagValue(
key=self.key,
value=stage.value,
times_seen=None,
first_seen=None,
last_seen=None,
)
for stage in ReleaseStages
if not self.query or self.query in stage.value
]
def semver_autocomplete_function(self):
versions = Release.objects.filter(version__contains="@" + self.query)
project_ids = self.snuba_params.project_ids
if project_ids:
release_projects = ReleaseProject.objects.filter(project_id__in=project_ids)
versions = versions.filter(id__in=release_projects.values_list("release_id", flat=True))
environment_ids = self.snuba_params.environment_ids
if environment_ids:
release_environments = ReleaseEnvironment.objects.filter(
environment_id__in=environment_ids
)
versions = versions.filter(
id__in=release_environments.values_list("release_id", flat=True)
)
order_by = map(_flip_field_sort, Release.SEMVER_COLS + ["package"])
versions = versions.filter_to_semver() # type: ignore[attr-defined] # mypy doesn't know about ReleaseQuerySet
versions = versions.annotate_prerelease_column()
versions = versions.order_by(*order_by)
seen = set()
formatted_versions = []
# We want to format versions here in a way that makes sense for autocomplete. So we
# - Only include package if we think the user entered a package
# - Exclude build number, since it's not used as part of filtering
# When we don't include package, this can result in duplicate version numbers, so we
# also de-dupe here. This can result in less than 1000 versions returned, but we
# typically use very few values so this works ok.
for version in versions.values_list("version", flat=True)[:1000]:
formatted_version = version.split("@", 1)[1]
formatted_version = formatted_version.split("+", 1)[0]
if formatted_version in seen:
continue
seen.add(formatted_version)
formatted_versions.append(
TagValue(
key=self.key,
value=formatted_version,
times_seen=None,
first_seen=None,
last_seen=None,
)
)
return formatted_versions
def semver_build_autocomplete_function(self):
build = self.query if self.query else ""
if not build.endswith("*"):
build += "*"
organization_id = self.snuba_params.organization_id
assert organization_id is not None
versions = Release.objects.filter_by_semver_build(
organization_id,
"exact",
build,
self.snuba_params.project_ids,
)
environment_ids = self.snuba_params.environment_ids
if environment_ids:
release_environments = ReleaseEnvironment.objects.filter(
environment_id__in=environment_ids
)
versions = versions.filter(
id__in=release_environments.values_list("release_id", flat=True)
)
builds = (
versions.values_list("build_code", flat=True).distinct().order_by("build_code")[:1000]
)
return [
TagValue(
key=self.key,
value=build,
times_seen=None,
first_seen=None,
last_seen=None,
)
for build in builds
]
def semver_package_autocomplete_function(self):
packages = (
Release.objects.filter(
organization_id=self.snuba_params.organization_id,
package__startswith=self.query,
)
.values_list("package")
.distinct()
)
versions = Release.objects.filter(
organization_id=self.snuba_params.organization_id,
package__in=packages,
id__in=ReleaseProject.objects.filter(
project_id__in=self.snuba_params.project_ids
).values_list("release_id", flat=True),
).annotate_prerelease_column() # type: ignore[attr-defined] # mypy doesn't know about ReleaseQuerySet
environment_ids = self.snuba_params.environment_ids
if environment_ids:
release_environments = ReleaseEnvironment.objects.filter(
environment_id__in=environment_ids
)
versions = versions.filter(
id__in=release_environments.values_list("release_id", flat=True)
)
packages = versions.values_list("package", flat=True).distinct().order_by("package")[:1000]
return [
TagValue(
key=self.key,
value=package,
times_seen=None,
first_seen=None,
last_seen=None,
)
for package in packages
]
def skip_autocomplete(self) -> list[TagValue]:
return []
def boolean_autocomplete_function(self) -> list[TagValue]:
return [
TagValue(
key=self.key,
value="false",
times_seen=None,
first_seen=None,
last_seen=None,
),
TagValue(
key=self.key,
value="true",
times_seen=None,
first_seen=None,
last_seen=None,
),
]
def string_autocomplete_function(self) -> list[TagValue]:
adjusted_start_date, adjusted_end_date = adjust_start_end_window(
self.snuba_params.start_date, self.snuba_params.end_date
)
start_timestamp = Timestamp()
start_timestamp.FromDatetime(adjusted_start_date)
end_timestamp = Timestamp()
end_timestamp.FromDatetime(adjusted_end_date)
query = translate_escape_sequences(self.query)
meta = self.resolver.resolve_meta(referrer=Referrer.API_SPANS_TAG_VALUES_RPC.value)
rpc_request = TraceItemAttributeValuesRequest(
meta=meta,
key=self.attribute_key,
value_substring_match=query,
limit=self.limit,
page_token=PageToken(offset=self.offset),
)
rpc_response = snuba_rpc.attribute_values_rpc(rpc_request)
values: Sequence[str] = rpc_response.values
if self.context_definition:
context = self.context_definition.constructor(self.snuba_params)
values = [context.value_map.get(value, value) for value in values]
return [
TagValue(
key=self.key,
value=value,
times_seen=None,
first_seen=None,
last_seen=None,
)
for value in values
if value
]
def adjust_start_end_window(start_date: datetime, end_date: datetime) -> tuple[datetime, datetime]:
start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0)
end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)
return start_date, end_date
| TraceItemAttributeValuesAutocompletionExecutor |
python | sympy__sympy | doc/ext/numpydoc.py | {
"start": 5300,
"end": 6196
} | class ____(ManglingDomainBase, CDomain):
name = 'np-c'
directive_mangling_map = {
'function': 'function',
'member': 'attribute',
'macro': 'function',
'type': 'class',
'var': 'object',
}
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
self.content = ViewList(lines, self.content.parent)
return base_directive.run(self)
return directive
| NumpyCDomain |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 272689,
"end": 295313
} | class ____(ExternKernelAlloc):
"""
A class that represents a fallback kernel for handling operators that are not
directly support by inductor. It currently supports functional ops, view ops,
inplace aten ops, and mutating ops that are auto-functionalizable.
"""
def __init__(
self,
layout: OutputSpec,
kernel: _OpOverloads,
tensor_args: Sequence[IRNode],
nontensor_args: Sequence[Any],
unflatten_args: Callable[..., Any],
kwargs: Optional[dict[str, Any]] = None,
*,
unbacked_bindings: Optional[dict[sympy.Symbol, pytree.KeyPath]] = None,
) -> None:
super().__init__(
layout,
tuple(tensor_args),
tuple(nontensor_args),
op_overload=kernel,
)
self.use_runtime_dispatch = False
self.unbacked_bindings = unbacked_bindings or {}
assert isinstance(
kernel, (torch._ops.OpOverload, torch._ops.HigherOrderOperator)
), f"Fails to create FallbackKernel for {kernel}: {type(kernel)} not supported"
self.op_overload = kernel
self.unflatten_args = unflatten_args
self.kwargs = {} if kwargs is None else kwargs
assert self.python_kernel_name is not None
V.graph.warn_fallback(self.python_kernel_name)
# args that are aliased
self.alias_names: list[str] = []
# args that are mutated AND returned from the op
self.mutation_names: list[str] = []
if isinstance(self.op_overload, torch._ops.HigherOrderOperator):
# We assume here that HOPs with FallbackKernel are functional.
# This may not always be true! HOPs must individually opt-in to
# FallbackKernel, so please check this if you opt-in.
return
if "_c10d_functional" in self.op_overload.name():
# _c10d_functional kernels are lowered into _CollectiveKernel which
# derives from FallbackKernel for the cpp codegen. The kernels
# don't pass the can_auto_functionalize check, but their mutation
# is handled properly by _CollectiveKernel.
return
schema = self.op_overload._schema
# NOTE: [FallbackKernel supported operators]
# We only support three types of operators:
# - functional ops
# - view ops
# - inplace aten ops
# - mutating ops that are auto-functionalizable. That is,
# the operator may mutate any number of inputs, but its outputs
# may not alias any of the inputs.
#
# The unsupported cases usually do not show up here (because
# AOTAutograd functionalized them away); the only way for an in-place
# op to show up here is if a lowering or pass introduced it.
if torch._library.utils.mutates_and_returns_first_arg(self.op_overload):
self.mutation_names.append(tensor_args[0].get_name())
return
if schema.is_mutable and not can_auto_functionalize(kernel):
raise NotImplementedError(
f"NYI: Can't generate FallbackKernel for {kernel}"
)
args, kwargs = self.unflatten_args(self.inputs, self.constant_args)
def handle_aliasing_and_mutation(info: torch._C.Argument, arg: Any) -> None:
# Assertions to make sure we didn't mismatch args
if isinstance(info.type, torch.ListType):
assert isinstance(arg, (list, tuple)), type(arg)
if library_utils.is_tensor_like_type(info.type):
# PyTorch also accepts None and scalar types for args marked as "Tensor".
# We're not going to check all of them here.
assert not isinstance(arg, (tuple, list))
if arg is None:
return
if info.alias_info is None:
return
def add_alias(t: IRNode) -> None:
self.alias_names.append(t.get_name())
assert info.alias_info is not None
if info.alias_info.is_write:
self.mutation_outputs.append(
MutationOutput(NoneLayout(device=t.get_device()), t, self)
)
if library_utils.is_tensorlist_like_type(info.type):
if arg is not None:
for optional_tensor_arg in arg:
add_alias(optional_tensor_arg)
else:
assert library_utils.is_tensor_like_type(info.type)
# pyrefly: ignore [bad-argument-type]
add_alias(arg)
for info, arg in torch._library.utils.zip_schema(schema, args, kwargs):
handle_aliasing_and_mutation(info, arg)
def get_read_writes(self) -> dependencies.ReadWrites:
read_writes = super().get_read_writes()
if self.op_overload is torch._prims.rng_prims.graphsafe_run_with_rng_state:
for arg in self.constant_args:
if isinstance(arg, GeneratorState):
read_writes = read_writes.with_read(
dependencies.StarDep(arg.get_name())
)
return read_writes
def codegen_unbacked_symbol_defs(self, wrapper: PythonWrapperCodegen) -> None:
return wrapper.codegen_unbacked_symbol_defs_for_outputs(
self.get_name(), self.outputs, getattr(self, "unbacked_bindings", None)
)
def get_unbacked_symbol_defs(self) -> Container[sympy.Symbol]: # type: ignore[override]
if unbacked_bindings := getattr(self, "unbacked_bindings", None):
resolved = resolve_unbacked_bindings(
V.graph.sizevars.shape_env, unbacked_bindings
)
assert resolved is not None
return resolved.keys()
else:
return OrderedSet()
def codegen_args(self) -> list[str]:
@dataclasses.dataclass
class Shim:
ref: Any
def __repr__(self) -> str:
return self.ref
assert is_node_sequence(self.inputs)
tensor_args = [Shim(x.codegen_reference()) for x in self.inputs]
args, kwargs = self.unflatten_args(tensor_args, self.constant_args)
if V.graph.cpp_wrapper and isinstance(self.op_overload, torch._ops.OpOverload):
args = self.fill_non_provided_args(args, kwargs)
args = [
V.graph.wrapper_code.val_to_arg_str(x, param.real_type)
for param, x in zip(self.op_overload._schema.arguments, args)
]
else:
args = [V.graph.wrapper_code.val_to_arg_str(x) for x in args]
# let self.codegen_kwargs handle kwargs
self.kwargs.update(kwargs)
return args
@staticmethod
def find_device(
tensor_args: Optional[Sequence[torch.Tensor]], example_output: Sequence[Any]
) -> Any:
non_torch_bind_tensor_args = (
[t for t in tensor_args if not isinstance(t, TorchBindObject)]
if tensor_args
else None
)
if non_torch_bind_tensor_args:
assert tensor_args
devices = [arg.get_device() for arg in tensor_args if arg.get_device()]
return devices[0]
if isinstance(example_output, torch.Tensor):
return example_output.device
if isinstance(example_output, (list, tuple)):
device_set = OrderedSet(
FallbackKernel.find_device(None, x) for x in example_output
)
# Remove None
devices = [device for device in device_set if device]
if len(devices) == 1:
return devices[0]
for device in devices:
assert isinstance(device, torch.device)
if is_gpu(device.type):
return device
return devices[0]
return None
def has_side_effects(self) -> bool:
if isinstance(self.op_overload, torch._ops.HigherOrderOperator):
return False
return get_schema_info(self.op_overload).is_mutable()
def get_inputs_that_alias_output(self) -> Sequence[str]:
assert isinstance(
self.op_overload, (torch._ops.OpOverload, torch._ops.HigherOrderOperator)
), (
f"Fails to create FallbackKernel for {self.op_overload}: "
f"{type(self.op_overload)} not supported"
)
# See [Note: FallbackKernel supported operators]: for a mutating
# op that is auto-functionalizable, its outputs does NOT
# alias any of the inputs.
if (
not isinstance(self.op_overload, torch._ops.HigherOrderOperator)
and "_c10d_functional" not in self.op_overload.name()
and self.op_overload._schema.is_mutable
and can_auto_functionalize(self.op_overload)
):
return []
else:
return self.alias_names
def get_mutation_names(self) -> Sequence[str]:
assert len(self.mutation_names) <= 1
return self.mutation_names
def export_extern_kernel_node(self): # type: ignore[no-untyped-def]
"""
ProxyExecutor Design Note
We export the ExternFallbackNodes (for custom ops) into a serialized file
and run it with a host side proxy executor to address the ABI problem
This is currently only implemented for fbcode. Eventually, we will also make this work for OSS.
Detailed design doc can be found at
https://docs.google.com/document/d/1wC4DOZFaYym2t1Esz0X5yxlLI3RDnSiyRbUus3bkJ64/edit?usp=sharing
"""
log.debug(
"Extern kernel node added for node %s with target %s.",
self.get_name(),
self.op_overload,
)
assert isinstance(self, FallbackKernel), type(self)
args, kwargs = self.unflatten_args(self.inputs, self.constant_args)
args = self.fill_non_provided_args(args, kwargs)
ordered_kwargs = [
self.get_kwargs_value(key, **kwargs)
for key in self.ordered_kwargs_for_cpp_kernel
]
target = self.op_overload
if not V.graph.aot_mode:
# No need to serialize in the cpp wrapper JIT mode
return [*args, *ordered_kwargs]
serializer = GraphModuleSerializer(None, []) # type: ignore[arg-type]
named_arguments = serializer.serialize_inputs(target, args, kwargs)
# serialize_outputs
def handle_single_output(
return_type: Union[torch.TensorType, torch.ListType, torch.JitType],
output: Union[IRNode, Sequence[IRNode]],
) -> export_schema.Argument:
if isinstance(return_type, (torch.TensorType, torch.NoneType)):
# For single Tensor or None
out = output
if isinstance(output, (list, tuple)):
assert len(output) == 1
out = output[0]
if isinstance(return_type, torch.TensorType):
assert isinstance(out, IRNode)
return export_schema.Argument.create(
as_tensor=export_schema.TensorArgument(name=out.get_name())
)
else: # NoneType
assert out is None
return export_schema.Argument.create(as_none=True)
elif isinstance(return_type, torch.ListType) and isinstance(
return_type.getElementType(), torch.TensorType
):
assert isinstance(output, Sequence), type(output)
# For single TensorList
return export_schema.Argument.create(
as_tensors=[
export_schema.TensorArgument(name=out.get_name())
for out in output
]
)
elif isinstance(return_type, torch.OptionalType) and isinstance(
return_type.getElementType(), torch.TensorType
):
# For OptionalTensor
if output is None:
return export_schema.Argument.create(
as_optional_tensor=export_schema.OptionalTensorArgument.create(
as_none=True
)
)
else:
assert isinstance(output, IRNode)
return export_schema.Argument.create(
as_optional_tensor=export_schema.OptionalTensorArgument.create(
as_tensor=export_schema.TensorArgument(
name=output.get_name()
)
)
)
elif isinstance(return_type, torch.IntType):
return export_schema.Argument.create(as_int=output)
else:
raise RuntimeError(f"Unsupported return type {type(return_type)}")
if isinstance(target, torch._higher_order_ops.torchbind.CallTorchBind):
returns = target.schema(args[0], args[1]).returns
else:
returns = target._schema.returns # type: ignore[union-attr]
if len(returns) == 1:
# NOTE: [special handling of all_reduce_coalesced_'s return value]
# all_reduce_coalesced_ return a list of tensors via self.mutation_outputs
outputs = self.outputs if self.outputs else self.mutation_outputs
return_type = returns[0].real_type
output_arguments = [handle_single_output(return_type, outputs)]
else:
# For tuple returns, e.g "-> (Tensor, Tensor)" or "-> (Tesnor, Tensor[])"
# Not generating output args for self.mutation_outputs
output_arguments = [
handle_single_output(
return_schema.real_type, # type: ignore[attr-defined]
output,
)
for return_schema, output in zip(returns, self.outputs)
]
assert self.op_overload is not None
node = ExternKernelNode(
name=self.get_name(),
node=export_schema.Node(
target=self.op_overload.name(),
inputs=named_arguments,
outputs=output_arguments,
metadata={},
),
)
V.extern_kernel_nodes.append(node)
return [*args, *ordered_kwargs]
@override
def codegen(self, wrapper: PythonWrapperCodegen) -> None:
"""Overrides the parent member.
See https://github.com/pytorch/pytorch/issues/151692"""
kernel = self.op_overload
assert kernel is not None
if kernel.namespace == "aten":
# Aten Fallback Ops
assert isinstance(kernel, torch._ops.OpOverload), type(kernel)
if V.graph.cpp_wrapper:
from torchgen.aoti.fallback_ops import inductor_fallback_ops
if str(kernel) not in inductor_fallback_ops:
# C shim v2 is torchgen-ed, which should cover all aten ops.
# If you do hit a missed op, please update fallback_ops.py.
log.warning(
"%s is missing a c-shim implementation, using proxy executor as fallback",
kernel,
)
self.use_runtime_dispatch = True
elif kernel.namespace == "_quantized":
# Internal Quantized Fallback Ops
assert isinstance(kernel, torch._ops.OpOverload), type(kernel)
elif V.graph.cpp_wrapper:
# For non-aten OpOverload, i.e. custom ops
# If the op is in custom_ops_to_c_shims, generate direct function call
self.use_runtime_dispatch = (
kernel not in config.aot_inductor.custom_ops_to_c_shims
)
# Handle the special case where a complex number is input to a C-shim kernel for
# a scalar input. The torchgen'ed shim API will use type "double", which is
# incompatible with complex numbers, forcing a fallback to runtime dispatch.
if (
V.graph.cpp_wrapper
and isinstance(kernel, torch._ops.OpOverload)
and not self.use_runtime_dispatch
):
def is_number(t: torch.JitType) -> bool:
if isinstance(t, torch.OptionalType):
return is_number(t.getElementType())
return isinstance(t, torch.NumberType)
# Using unflatten_args is a bit of a hack, but all the complex arguments we
# care about are in self.constant_args, and calling unflatten_args puts them
# in the correct order without triggering codegen.
args, kwargs = self.unflatten_args(self.inputs, self.constant_args)
# Append kwarg values to args. ordered_kwargs_for_cpp_kernel is guaranteed
# to be set, since this is an OpOverload kernel.
args_iter = itertools.chain(
args,
(
self.get_kwargs_value(k, **kwargs)
for k in self.ordered_kwargs_for_cpp_kernel
),
)
self.use_runtime_dispatch = any(
isinstance(v, complex) and is_number(a.real_type)
for v, a in zip(args_iter, kernel._schema.arguments)
)
self.codegen_comment(wrapper)
if self.use_runtime_dispatch:
exported_args = self.export_extern_kernel_node()
assert self.python_kernel_name is not None
assert self.op_overload is not None
wrapper.generate_fallback_kernel_with_runtime_lookup(
self.get_name(),
self.python_kernel_name,
lambda: [*self.codegen_args(), *self.codegen_kwargs()],
self.op_overload,
exported_args,
# NOTE: [special handling of all_reduce_coalesced_'s return value]
self.outputs if self.outputs else self.mutation_outputs,
)
else:
wrapper.generate_fallback_kernel(self)
if isinstance(self.layout, Layout):
self.codegen_size_asserts(wrapper)
self.codegen_alignment_asserts(wrapper)
self.codegen_memory_tracking(wrapper)
self.codegen_unbacked_symbol_defs(wrapper)
@staticmethod
def tensor_to_layout(output: torch.Tensor) -> FixedLayout:
is_pinned = False
try:
is_pinned = output.is_pinned()
except RuntimeError:
# dispatch not implemented
pass
return FixedLayout(
output.device,
output.dtype,
convert_shape_to_inductor(output.size()),
convert_shape_to_inductor(output.stride()),
is_pinned=is_pinned,
)
@classmethod
def create(cls, kernel: _OpOverloads, *args: Any, **kwargs: Any) -> FallbackKernel:
"""Create an instance of FallbackKernel from an _OpOverloads"""
fake_incorrect_kernels = (aten._fused_moving_avg_obs_fq_helper_functional,)
if kernel not in fake_incorrect_kernels:
context = cast(AbstractContextManager[None], V.graph.fake_mode)
else:
context = nullcontext()
with context:
(
example_output,
tensor_args,
non_tensor_args,
unflatten_args,
unbacked_bindings,
) = cls.process_kernel(kernel, *args, **kwargs)
# We need this extra check for input alignment since the example
# inputs we created are always aligned.
has_unaligned_input = any(is_unaligned(arg) for arg in tensor_args)
device = cls.find_device(tensor_args, example_output)
if not device and isinstance(
kernel, torch._higher_order_ops.torchbind.CallTorchBind
):
# use CPU device for torchbind methods that don't take in or output any tensor, e.g. size()
device = torch.device("cpu")
if example_output is None:
packed = cls(
NoneLayout(device=device),
kernel,
tensor_args,
non_tensor_args,
unflatten_args,
unbacked_bindings=unbacked_bindings,
)
else:
assert device, "Not sure where to find device info"
packed = cls(
MultiOutputLayout(device=device),
kernel,
tensor_args,
non_tensor_args,
unflatten_args,
unbacked_bindings=unbacked_bindings,
)
def generate_output(output: Any, indices: list[tuple[Any, int]]) -> Any:
if isinstance(output, (list, tuple)):
return type(output)(
generate_output(output[i], indices + [(type(output), i)])
for i in range(len(output))
)
elif isinstance(output, dict):
return {
key: generate_output(val, indices + [(type(output), key)])
for key, val in output.items()
}
elif isinstance(output, torch.Tensor):
buf = MultiOutput(
cls.tensor_to_layout(output),
packed,
indices,
)
if (
config.assume_unaligned_fallback_output
or has_unaligned_input
or not tensor_is_aligned(output)
):
V.graph.unaligned_buffers.add(buf.name) # type: ignore[arg-type]
return buf
elif isinstance(output, int):
return output
elif isinstance(output, torch.SymInt):
return output.node.expr
else:
assert output is None, (
f"FallbackKernel output type {type(output)} is not supported"
)
return None
outputs = generate_output(example_output, [])
if isinstance(outputs, (list, tuple)):
packed.outputs = outputs
elif isinstance(outputs, dict):
packed.outputs = tuple(outputs)
else:
packed.outputs = [outputs]
# pyrefly: ignore [bad-return]
return outputs
@ir_dataclass(frozen=False)
| FallbackKernel |
python | pyparsing__pyparsing | tests/test_unit.py | {
"start": 402077,
"end": 404520
} | class ____(unittest.TestCase):
def test_loads_markdown_file(self):
# Mock the file read to simulate the Markdown content
mock_content = "## Test Best Practices\n- Example guideline"
mock_file = mock_open(read_data=mock_content)
with patch("importlib.resources.files") as mock_files:
# mock path.open() to use our mock_file
mock_path = mock_files.return_value.joinpath.return_value
mock_path.open = mock_file
result = pp.show_best_practices(file=None)
self.assertEqual(result, mock_content)
def test_fallback_when_file_missing(self):
# Patch files().joinpath().open to raise FileNotFoundError
with patch("importlib.resources.files") as mock_files:
mock_path = mock_files.return_value.joinpath.return_value
mock_path.open.side_effect = FileNotFoundError
result = pp.show_best_practices(file=None)
self.assertIn("## Planning", result) # Fallback contains "Planning" section
self.assertIn("## Implementing", result)
self.assertIn("## Testing", result)
self.assertIn("## Debugging", result)
def test_cli_invocation_with_module_flag(self):
# Invoke the CLI the same way a user or AI would:
# python -m pyparsing.show_best_practices
cmd = [sys.executable, "-m", "pyparsing.ai.show_best_practices"]
subproc = subprocess.run(cmd, capture_output=True, text=True)
self.assertEqual(subproc.returncode, 0, msg=f"stderr: {subproc.stderr}")
subproc_stdout = subproc.stdout
# Should print the best practices markdown (either from file or fallback)
self.assertIn("## Planning", subproc_stdout)
self.assertIn("## Implementing", subproc_stdout)
self.assertIn("## Testing", subproc_stdout)
self.assertIn("## Debugging", subproc_stdout)
# force clear of packrat parsing flags before saving contexts
pp.ParserElement._packratEnabled = False
pp.ParserElement._parse = pp.ParserElement._parseNoCache # noqa
Test02_WithoutPackrat.suite_context = ppt.reset_pyparsing_context().save()
Test02_WithoutPackrat.save_suite_context = ppt.reset_pyparsing_context().save()
default_suite_context = ppt.reset_pyparsing_context().save()
pp.ParserElement.enable_left_recursion()
recursion_suite_context = ppt.reset_pyparsing_context().save()
default_suite_context.restore()
| TestShowBestPractices |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/glue.py | {
"start": 12947,
"end": 16670
} | class ____(AwsBaseOperator[GlueDataQualityHook]):
"""
Creates a data quality ruleset with DQDL rules applied to a specified Glue table.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GlueDataQualityOperator`
:param name: A unique name for the data quality ruleset.
:param ruleset: A Data Quality Definition Language (DQDL) ruleset.
For more information, see the Glue developer guide.
:param description: A description of the data quality ruleset.
:param update_rule_set: To update existing ruleset, Set this flag to True. (default: False)
:param data_quality_ruleset_kwargs: Extra arguments for RuleSet.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = GlueDataQualityHook
template_fields: Sequence[str] = aws_template_fields(
"name", "ruleset", "description", "data_quality_ruleset_kwargs"
)
template_fields_renderers = {
"data_quality_ruleset_kwargs": "json",
}
ui_color = "#ededed"
def __init__(
self,
*,
name: str,
ruleset: str,
description: str = "AWS Glue Data Quality Rule Set With Airflow",
update_rule_set: bool = False,
data_quality_ruleset_kwargs: dict | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.name = name
self.ruleset = ruleset.strip()
self.description = description
self.update_rule_set = update_rule_set
self.data_quality_ruleset_kwargs = data_quality_ruleset_kwargs or {}
def validate_inputs(self) -> None:
if not self.ruleset.startswith("Rules") or not self.ruleset.endswith("]"):
raise AttributeError("RuleSet must starts with Rules = [ and ends with ]")
if self.data_quality_ruleset_kwargs.get("TargetTable"):
target_table = self.data_quality_ruleset_kwargs["TargetTable"]
if not target_table.get("TableName") or not target_table.get("DatabaseName"):
raise AttributeError("Target table must have DatabaseName and TableName")
def execute(self, context: Context):
self.validate_inputs()
config = {
"Name": self.name,
"Ruleset": self.ruleset,
"Description": self.description,
**self.data_quality_ruleset_kwargs,
}
try:
if self.update_rule_set:
self.hook.conn.update_data_quality_ruleset(**config)
self.log.info("AWS Glue data quality ruleset updated successfully")
else:
self.hook.conn.create_data_quality_ruleset(**config)
self.log.info("AWS Glue data quality ruleset created successfully")
except ClientError as error:
raise AirflowException(
f"AWS Glue data quality ruleset failed: {error.response['Error']['Message']}"
)
| GlueDataQualityOperator |
python | TheAlgorithms__Python | graphs/edmonds_karp_multiple_source_and_sink.py | {
"start": 2542,
"end": 2939
} | class ____(FlowNetworkAlgorithmExecutor):
def __init__(self, flow_network):
super().__init__(flow_network)
# use this to save your result
self.maximum_flow = -1
def get_maximum_flow(self):
if not self.executed:
raise Exception("You should execute algorithm before using its result!")
return self.maximum_flow
| MaximumFlowAlgorithmExecutor |
python | huggingface__transformers | src/transformers/models/ovis2/image_processing_ovis2.py | {
"start": 7685,
"end": 28908
} | class ____(BaseImageProcessor):
r"""
Constructs a Ovis2 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
crop_to_patches (`bool`, *optional*, defaults to `False`):
Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the
`preprocess` method.
min_patches (`int`, *optional*, defaults to 1):
The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `min_patches` parameter in the `preprocess` method.
max_patches (`int`, *optional*, defaults to 12):
The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `max_patches` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
use_covering_area_grid (`bool`, *optional*, defaults to `True`):
Whether to use the covering area grid to determine the number of patches. Only has an effect if
`crop_to_patches` is set to `True`. Can be overridden by the `use_covering_area_grid` parameter in the
`preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = Ovis2ImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
crop_to_patches: bool = False,
min_patches: int = 1,
max_patches: int = 12,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
use_covering_area_grid: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 384, "width": 384}
size = get_size_dict(size, default_to_square=True)
self.do_resize = do_resize
self.size = size
self.crop_to_patches = crop_to_patches
self.min_patches = min_patches
self.max_patches = max_patches
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
crop_to_patches: Optional[bool] = None,
min_patches: Optional[int] = None,
max_patches: Optional[int] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
do_convert_rgb: Optional[bool] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
use_covering_area_grid: bool = True,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
crop_to_patches (`bool`, *optional*, defaults to `self.crop_to_patches`):
Whether to crop the image to patches.
min_patches (`int`, *optional*, defaults to `self.min_patches`):
The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`.
max_patches (`int`, *optional*, defaults to `self.max_patches`):
The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
use_covering_area_grid (`bool`, *optional*, defaults to `True`):
Whether to use the covering area grid to determine the number of patches. Only has an effect if
`crop_to_patches` is set to `True`.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
crop_to_patches = crop_to_patches if crop_to_patches is not None else self.crop_to_patches
min_patches = min_patches if min_patches is not None else self.min_patches
max_patches = max_patches if max_patches is not None else self.max_patches
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
# PIL RGBA images are converted to RGB
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if crop_to_patches and max_patches > 1:
images = [
self.crop_image_to_patches(
image,
min_patches=min_patches,
max_patches=max_patches,
patch_size=size,
data_format=input_data_format,
use_covering_area_grid=use_covering_area_grid,
)
for image in images
]
grids = [grid for _, grid in images]
images = [image for images_list, _ in images for image in images_list]
else:
grids = [(1, 1)] * len(images)
for i, image in enumerate(images):
if do_resize:
images[i] = self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
images[i] = self.rescale(image=images[i], scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
images[i] = self.normalize(
image=images[i],
mean=image_mean,
std=image_std,
input_data_format=input_data_format,
)
images[i] = to_channel_dimension_format(images[i], data_format, input_channel_dim=input_data_format)
encoded_outputs = BatchFeature(data={"pixel_values": images, "grids": grids}, tensor_type=return_tensors)
return encoded_outputs
def crop_image_to_patches(
self,
images: np.ndarray,
min_patches: int,
max_patches: int,
use_covering_area_grid: bool = True,
patch_size: Optional[Union[tuple, int, dict]] = None,
data_format: Optional[ChannelDimension] = None,
covering_threshold: float = 0.9,
):
"""
Crop the image to patches and return a list of cropped images.
The number of patches and their grid arrangement are determined by the original image size,
the target patch size and the minimum and maximum number of patches.
The aspect ratio of the patches grid is chosen to be the closest to the original image aspect ratio.
Args:
images (`np.ndarray`):
The image to be cropped.
min_patches (`int`):
The minimum number of patches to be extracted from the image.
max_patches (`int`):
The maximum number of patches to be extracted from the image.
use_covering_area_grid (`bool`, *optional*, defaults to `True`):
Whether to use the covering area grid to determine the number of patches.
patch_size (`int`, `Tuple[int, int]`, `dict`, *optional*):
The size of the output patches.
data_format (`ChannelDimension`, *optional*):
The format of the image data. If `None`, the format is inferred from the input image.
covering_threshold (`float`, *optional*, defaults to `0.9`):
The threshold for the covering area grid. If the covering area is less than this value, the grid is
considered invalid.
Returns:
List[`PIL.Image.Image`] or List[np.ndarray]: The list of cropped images.
"""
if data_format is None:
data_format = infer_channel_dimension_format(images)
images = to_channel_dimension_format(images, ChannelDimension.FIRST, data_format)
patch_size_height, patch_size_width = patch_size["height"], patch_size["width"]
original_height, original_width = images.shape[-2:]
if use_covering_area_grid:
# Use the original OVIS2 approach: compute the minimal number of tiles that cover at least 90% of the image area
num_columns, num_rows = get_min_tile_covering_grid(
(original_height, original_width),
target_patch_size=patch_size_height, # square patch size
max_image_tiles=max_patches,
covering_threshold=covering_threshold,
)
else:
# find the closest aspect ratio to the target
num_columns, num_rows = get_optimal_tiled_canvas(
(original_height, original_width),
(patch_size_height, patch_size_width),
min_patches,
max_patches,
)
# calculate the target width and height
target_width = patch_size_width * num_columns
target_height = patch_size_height * num_rows
num_blocks = num_columns * num_rows
# resize the image so that each patch is of patch_size
resized_image = self.resize(
images,
{"height": target_height, "width": target_width},
data_format=ChannelDimension.FIRST,
input_data_format=ChannelDimension.FIRST,
)
# split the image into patches
processed_images = []
for i in range(num_blocks):
column = i % num_columns
row = i // num_columns
box = (
column * patch_size_width,
row * patch_size_height,
(column + 1) * patch_size_width,
(row + 1) * patch_size_height,
)
# split the image
patch_image = resized_image[..., box[1] : box[3], box[0] : box[2]]
patch_image = to_channel_dimension_format(patch_image, data_format, ChannelDimension.FIRST)
processed_images.append(patch_image)
if len(processed_images) != 1:
thumbnail_img = self.resize(
images, patch_size, data_format=data_format, input_data_format=ChannelDimension.FIRST
)
processed_images.insert(0, thumbnail_img)
return processed_images, (num_rows, num_columns)
__all__ = ["Ovis2ImageProcessor"]
| Ovis2ImageProcessor |
python | scikit-learn__scikit-learn | sklearn/model_selection/_split.py | {
"start": 17865,
"end": 23712
} | class ____(GroupsConsumerMixin, _BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
Each group will appear exactly once in the test set across all folds (the
number of distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
samples is approximately the same in each test fold when `shuffle` is True.
Read more in the :ref:`User Guide <group_k_fold>`.
For visualisation of cross-validation behaviour and
comparison between common scikit-learn split methods
refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py`
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle the groups before splitting into batches.
Note that the samples within each split will not be shuffled.
.. versionadded:: 1.6
random_state : int, RandomState instance or None, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold. Otherwise, this
parameter has no effect.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
.. versionadded:: 1.6
Notes
-----
Groups appear in an arbitrary order throughout the folds.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> groups = np.array([0, 0, 2, 2, 3, 3])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits()
2
>>> print(group_kfold)
GroupKFold(n_splits=2, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(group_kfold.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2 3], group=[2 2]
Test: index=[0 1 4 5], group=[0 0 3 3]
Fold 1:
Train: index=[0 1 4 5], group=[0 0 3 3]
Test: index=[2 3], group=[2 2]
See Also
--------
LeaveOneGroupOut : For splitting the data according to explicit
domain-specific stratification of the dataset.
StratifiedKFold : Takes class information into account to avoid building
folds with imbalanced class proportions (for binary or multiclass
classification tasks).
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits, shuffle=shuffle, random_state=random_state)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
unique_groups, group_idx = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError(
"Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d." % (self.n_splits, n_groups)
)
if self.shuffle:
# Split and shuffle unique groups across n_splits
rng = check_random_state(self.random_state)
unique_groups = rng.permutation(unique_groups)
split_groups = np.array_split(unique_groups, self.n_splits)
for test_group_ids in split_groups:
test_mask = np.isin(groups, test_group_ids)
yield np.where(test_mask)[0]
else:
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(group_idx)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[group_idx]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
| GroupKFold |
python | kamyu104__LeetCode-Solutions | Python/maximum-good-subtree-score.py | {
"start": 2115,
"end": 3263
} | class ____(object):
def goodSubtreeSum(self, vals, par):
"""
:type vals: List[int]
:type par: List[int]
:rtype: int
"""
MOD = 10**9+7
def get_mask(x):
mask = 0
while x:
x, d = divmod(x, 10)
if mask&(1<<d):
return -1
mask |= 1<<d
return mask
def dfs(u):
dp = collections.defaultdict(int)
dp[0] = 0
mask = get_mask(vals[u])
if mask != -1:
dp[mask] = vals[u]
for v in adj[u]:
new_dp = dfs(v)
for m1, v1 in dp.items():
for m2, v2 in new_dp.iteritems():
if m1&m2:
continue
dp[m1|m2] = max(dp[m1|m2], v1+v2)
result[0] = (result[0]+max(dp.itervalues()))%MOD
return dp
adj = [[] for _ in xrange(len(vals))]
for u in xrange(1, len(par)):
adj[par[u]].append(u)
result = [0]
dfs(0)
return result[0]
| Solution2 |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 123827,
"end": 123965
} | class ____:
xlDays = 0 # from enum XlTimeUnit
xlMonths = 1 # from enum XlTimeUnit
xlYears = 2 # from enum XlTimeUnit
| TimeUnit |
python | getsentry__sentry | tests/sentry/workflow_engine/migration_helpers/test_migrate_alert_rule.py | {
"start": 57022,
"end": 58801
} | class ____(BaseMetricAlertMigrationTest):
"""
Tests for get_resolve_threshold(), which calculates the resolution threshold for an alert rule
if none is explicitly specified.
"""
def setUp(self) -> None:
self.metric_alert = self.create_alert_rule()
self.alert_rule_trigger = self.create_alert_rule_trigger(
alert_rule=self.metric_alert, label="critical"
)
self.create_migrated_metric_alert_objects(self.metric_alert)
self.create_migrated_metric_alert_rule_trigger_objects(
self.alert_rule_trigger, DetectorPriorityLevel.HIGH, Condition.GREATER
)
def test_calculate_resolve_threshold_critical_only(self) -> None:
detector = AlertRuleDetector.objects.get(alert_rule_id=self.metric_alert.id).detector
detector_dcg = detector.workflow_condition_group
assert detector_dcg # to appease mypy
resolve_threshold = get_resolve_threshold(detector_dcg)
assert resolve_threshold == self.alert_rule_trigger.alert_threshold
def test_calculate_resolve_threshold_with_warning(self) -> None:
warning_trigger = self.create_alert_rule_trigger(
alert_rule=self.metric_alert, label="warning", alert_threshold=50
)
self.create_migrated_metric_alert_rule_trigger_objects(
warning_trigger, DetectorPriorityLevel.MEDIUM, Condition.GREATER
)
detector = AlertRuleDetector.objects.get(alert_rule_id=self.metric_alert.id).detector
detector_dcg = detector.workflow_condition_group
assert detector_dcg # to appease mypy
resolve_threshold = get_resolve_threshold(detector_dcg)
assert resolve_threshold == warning_trigger.alert_threshold
| CalculateResolveThresholdHelperTest |
python | google__pytype | pytype/pyi/parser_test.py | {
"start": 18442,
"end": 21422
} | class ____(parser_test_base.ParserTestBase):
def test_callable_parameters(self):
self.check("""
from typing import Callable
x: Callable[[int, str], bool]""")
self.check(
"""
from typing import Callable
x = ... # type: Callable[..., bool]""",
"""
from typing import Callable
x: Callable[..., bool]""",
)
self.check(
"""
from typing import Any, Callable
x: Callable[Any, bool]""",
"""
from typing import Callable
x: Callable[..., bool]""",
)
self.check("""
from typing import Any, Callable
x: Callable[[Any], bool]""")
self.check("""
from typing import Callable
x: Callable[[], bool]""")
self.check_error(
"""
from typing import Callable
x = ... # type: Callable[[int]]
""",
2,
"Expected 2 parameters to Callable, got 1",
)
self.check_error(
"""
from typing import Callable
x = ... # type: Callable[[], ...]
""",
2,
"Unexpected ellipsis parameter",
)
self.check_error(
"import typing\n\nx = ... # type: typing.Callable[int, int]",
3,
"First argument to Callable must be a list of argument types",
)
self.check_error(
"import typing\n\nx = ... # type: typing.Callable[[], bool, bool]",
3,
"Expected 2 parameters to Callable, got 3",
)
self.check_error(
"import typing\n\nx = ... # type: typing.Callable[[...], bool]",
3,
"did you mean Callable[..., bool]?",
)
def test_ellipsis(self):
self.check_error(
"""
from typing import List
x: List[int, ...]
""",
2,
"Unexpected ellipsis parameter",
)
self.check_error("x: list[int, ...]", 1, "Unexpected ellipsis parameter")
# Tuple[T] and Tuple[T, ...] are distinct.
self.check(
"from typing import Tuple\n\nx = ... # type: Tuple[int]",
"x: tuple[int]",
)
self.check(
"from typing import Tuple\n\nx = ... # type: Tuple[int, ...]",
"x: tuple[int, ...]",
)
def test_tuple(self):
self.check(
"""
from typing import Tuple
x = ... # type: Tuple[int, str]""",
"""
x: tuple[int, str]""",
)
self.check_error(
"""
from typing import Tuple
x = ... # type: Tuple[int, str, ...]
""",
2,
"Unexpected ellipsis parameter",
)
def test_empty_tuple(self):
self.check(
"""
from typing import Tuple
def f() -> Tuple[()]: ...
""",
"""
def f() -> tuple[()]: ...
""",
)
def test_simple(self):
self.check("x: Foo[int, str]")
def test_type_tuple(self):
self.check("x = (str, bytes)", "x: tuple")
self.check("x = (str, bytes,)", "x: tuple")
self.check("x = (str,)", "x: tuple")
self.check("x = str,", "x: tuple")
| HomogeneousTypeTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/redshift_data.py | {
"start": 1538,
"end": 1671
} | class ____:
"""Describes the output of a query execution."""
statement_id: str
session_id: str | None
| QueryExecutionOutput |
python | falconry__falcon | tests/test_httperror.py | {
"start": 5065,
"end": 5218
} | class ____:
def on_get(self, req, resp):
raise falcon.HTTPMethodNotAllowed(['PUT'], description='Not Allowed')
| MethodNotAllowedResourceWithBody |
python | Textualize__textual | docs/examples/widgets/rich_log.py | {
"start": 964,
"end": 1744
} | class ____(App):
def compose(self) -> ComposeResult:
yield RichLog(highlight=True, markup=True)
def on_ready(self) -> None:
"""Called when the DOM is ready."""
text_log = self.query_one(RichLog)
text_log.write(Syntax(CODE, "python", indent_guides=True))
rows = iter(csv.reader(io.StringIO(CSV)))
table = Table(*next(rows))
for row in rows:
table.add_row(*row)
text_log.write(table)
text_log.write("[bold magenta]Write text or any Rich renderable!")
def on_key(self, event: events.Key) -> None:
"""Write Key events to log."""
text_log = self.query_one(RichLog)
text_log.write(event)
if __name__ == "__main__":
app = RichLogApp()
app.run()
| RichLogApp |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 656656,
"end": 657042
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("Environment", graphql_name="node")
"""The item at the end of the edge."""
| EnvironmentEdge |
python | google__jax | jax/_src/pallas/core.py | {
"start": 21234,
"end": 27556
} | class ____:
"""An internal canonicalized version of BlockSpec.
See the `check_invariants` method for precise specification.
"""
# TODO(apaszke,sharadmv): Replace mapped dims in block_shape with a transform.
# After all, it's just indexing out singleton dimensions.
block_shape: tuple[BlockDim, ...]
transformed_block_aval: state.AbstractRef
index_map_jaxpr: jax_core.ClosedJaxpr
index_map_out_tree: tree_util.PyTreeDef
array_aval: jax_core.ShapedArray # The whole array
origin: OriginStr
transforms: Sequence[MemoryRefTransform] = ()
pipeline_mode: Buffered | None = None
debug: bool = False
def check_invariants(self) -> None:
if not config.enable_checks.value: return
ref_block_shape = _get_ref_block_shape(self.block_shape)
assert ref_block_shape == self.ref_aval.shape, (
self.block_shape, self.ref_aval.shape)
assert len(self.block_shape) == len(self.array_aval.shape), (
self.block_shape, self.array_aval
)
assert not self.index_map_jaxpr.consts
assert all(ov.shape == () and
(ov.dtype == jnp.int32 or ov.dtype == jnp.int64)
for ov in self.index_map_jaxpr.out_avals), (
self.index_map_jaxpr.out_avals)
def replace(self, **kwargs):
new_self = dataclasses.replace(self, **kwargs)
new_self.check_invariants()
return new_self
@property
def block_aval(self) -> state.AbstractRef:
# If you hit this, make sure you take transforms into account and use either
# ref_aval or transformed_block_aval.
assert not self.transforms, "Lowering failed to handle transforms"
return self.transformed_block_aval
@property
def ref_aval(self) -> state.AbstractRef | TransformedRef:
"""Returns the abstract value of the Ref after transformations."""
if not self.transforms:
return self.transformed_block_aval
ref = TransformedRef(self.transformed_block_aval, ())
for transform in reversed(self.transforms):
ref = transform.undo(ref)
return ref
def compute_start_indices_interpret(self, loop_idx, *args):
discharged_jaxpr, discharged_consts = state_discharge.discharge_state(
self.index_map_jaxpr.jaxpr, self.index_map_jaxpr.consts
)
jaxpr = jax_core.ClosedJaxpr(discharged_jaxpr, discharged_consts)
block_indices_and_rest = jax_core.jaxpr_as_fun(jaxpr)(*loop_idx, *args)
# Since we're passing in `Ref`s potentially, we need to split out their
# updated values since we only care about the return values.
block_indices, _ = split_list(block_indices_and_rest,
[len(self.block_shape)])
def _get_start_index(i, b):
match b:
case Squeezed() | Element():
return i
case Blocked(block_size):
return block_size * i
case _:
raise ValueError(f"Unsupported block dim type: {type(b)}")
return tuple(
_get_start_index(i, b) for i, b in zip(block_indices, self.block_shape)
)
def has_trivial_window(self):
"""If block shape is same as the array shape and index_map returns 0s."""
for b, s in zip(self.block_shape, self.array_aval.shape):
if _get_block_dim_size(b) != s:
return False
for atom in self.index_map_jaxpr.jaxpr.outvars:
if not (isinstance(atom, jax_core.Literal) and atom.val == 0):
return False
return True
def to_block_spec(self) -> BlockSpec:
def index_map(*args):
flat_args = tree_util.tree_leaves(args)
return jax_core.jaxpr_as_fun(self.index_map_jaxpr)(*flat_args)
return BlockSpec(
self.block_shape,
index_map,
memory_space=self.block_aval.memory_space,
pipeline_mode=self.pipeline_mode,
)
def to_lojax(
self, index_map_avals, index_map_tree, grid, vmapped_dims
) -> list[BlockMapping]:
block_aval = self.transformed_block_aval
if not block_aval.inner_aval.is_high:
return [self]
assert self.array_aval.is_high
lo_array_avals = self.array_aval.lo_ty()
block_spec = self.to_block_spec()
if not hasattr(block_aval.inner_aval, "lower_block_spec"):
raise ValueError(
f"Cannot lower block spec {block_spec} on {block_aval.inner_aval}."
" Need to define lower_block_spec method on the type."
)
lo_block_specs = block_aval.inner_aval.lower_block_spec(block_spec)
return [
_convert_block_spec_to_block_mapping(
bs,
self.origin,
lo_array_aval,
index_map_avals=index_map_avals,
index_map_tree=index_map_tree,
grid=grid,
vmapped_dims=vmapped_dims,
debug=self.debug,
)
for bs, lo_array_aval in zip(lo_block_specs, lo_array_avals)
]
def __repr__(self):
if self.debug:
return (
f"BlockMapping(block_shape={self.block_shape}, "
f"transformed_block_aval={self.transformed_block_aval}, "
f"index_map_jaxpr={self.index_map_jaxpr}, "
f"index_map_out_tree={self.index_map_out_tree}, "
f"array_aval={self.array_aval}, "
f"origin={self.origin}, "
f"transforms={self.transforms}, "
f"pipeline_mode={self.pipeline_mode}, "
f"debug={self.debug})"
)
return f"BlockMapping(block_shape={self.block_shape})"
def __str__(self):
return self.__repr__()
@contextlib.contextmanager
def tracing_grid_env(grid: GridMappingGrid, mapped_dims: tuple[int, ...]):
if dynamic_shapes_export_enabled():
assert all(i is dynamic_grid_dim or jax_core.is_dim(i) for i in grid)
else:
assert all(i is dynamic_grid_dim or isinstance(i, int) for i in grid)
old_grid_context = _pallas_tracing_env.grid_context
try:
_pallas_tracing_env.grid_context = PallasGridContext(grid, mapped_dims)
yield
finally:
_pallas_tracing_env.grid_context = old_grid_context
@contextlib.contextmanager
def pallas_export_experimental(dynamic_shapes: bool):
old_dynamic_shapes = _pallas_tracing_env.dynamic_shapes
try:
_pallas_tracing_env.dynamic_shapes = dynamic_shapes
yield
finally:
_pallas_tracing_env.dynamic_shapes = old_dynamic_shapes
def dynamic_shapes_export_enabled() -> bool:
return _pallas_tracing_env.dynamic_shapes
@dataclasses.dataclass(frozen=True)
| BlockMapping |
python | gevent__gevent | src/greentest/3.9/test_ssl.py | {
"start": 208833,
"end": 221112
} | class ____(unittest.TestCase):
"""Verify behavior of close sockets with received data before to the handshake.
"""
class SingleConnectionTestServerThread(threading.Thread):
def __init__(self, *, name, call_after_accept, timeout=None):
self.call_after_accept = call_after_accept
self.received_data = b'' # set by .run()
self.wrap_error = None # set by .run()
self.listener = None # set by .start()
self.port = None # set by .start()
if timeout is None:
self.timeout = support.SHORT_TIMEOUT
else:
self.timeout = timeout
super().__init__(name=name)
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
try:
if self.listener:
self.listener.close()
except OSError:
pass
self.join()
self.wrap_error = None # avoid dangling references
def start(self):
self.ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.ssl_ctx.verify_mode = ssl.CERT_REQUIRED
self.ssl_ctx.load_verify_locations(cafile=ONLYCERT)
self.ssl_ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
self.listener = socket.socket()
self.port = socket_helper.bind_port(self.listener)
self.listener.settimeout(self.timeout)
self.listener.listen(1)
super().start()
def run(self):
try:
conn, address = self.listener.accept()
except TimeoutError:
# on timeout, just close the listener
return
finally:
self.listener.close()
with conn:
if self.call_after_accept(conn):
return
try:
tls_socket = self.ssl_ctx.wrap_socket(conn, server_side=True)
except OSError as err: # ssl.SSLError inherits from OSError
self.wrap_error = err
else:
try:
self.received_data = tls_socket.recv(400)
except OSError:
pass # closed, protocol error, etc.
def non_linux_skip_if_other_okay_error(self, err):
if sys.platform == "linux":
return # Expect the full test setup to always work on Linux.
if (isinstance(err, ConnectionResetError) or
(isinstance(err, OSError) and err.errno == errno.EINVAL) or
re.search('wrong.version.number', getattr(err, "reason", ""), re.I)):
# On Windows the TCP RST leads to a ConnectionResetError
# (ECONNRESET) which Linux doesn't appear to surface to userspace.
# If wrap_socket() winds up on the "if connected:" path and doing
# the actual wrapping... we get an SSLError from OpenSSL. Typically
# WRONG_VERSION_NUMBER. While appropriate, neither is the scenario
# we're specifically trying to test. The way this test is written
# is known to work on Linux. We'll skip it anywhere else that it
# does not present as doing so.
try:
self.skipTest(f"Could not recreate conditions on {sys.platform}:"
f" {err=}")
finally:
# gh-108342: Explicitly break the reference cycle
err = None
# If maintaining this conditional winds up being a problem.
# just turn this into an unconditional skip anything but Linux.
# The important thing is that our CI has the logic covered.
def test_preauth_data_to_tls_server(self):
server_accept_called = threading.Event()
ready_for_server_wrap_socket = threading.Event()
def call_after_accept(unused):
server_accept_called.set()
if not ready_for_server_wrap_socket.wait(support.SHORT_TIMEOUT):
raise RuntimeError("wrap_socket event never set, test may fail.")
return False # Tell the server thread to continue.
server = self.SingleConnectionTestServerThread(
call_after_accept=call_after_accept,
name="preauth_data_to_tls_server")
server.__enter__() # starts it
self.addCleanup(server.__exit__) # ... & unittest.TestCase stops it.
with socket.socket() as client:
client.connect(server.listener.getsockname())
# This forces an immediate connection close via RST on .close().
set_socket_so_linger_on_with_zero_timeout(client)
client.setblocking(False)
server_accept_called.wait()
client.send(b"DELETE /data HTTP/1.0\r\n\r\n")
client.close() # RST
ready_for_server_wrap_socket.set()
server.join()
wrap_error = server.wrap_error
server.wrap_error = None
try:
self.assertEqual(b"", server.received_data)
self.assertIsInstance(wrap_error, OSError) # All platforms.
self.non_linux_skip_if_other_okay_error(wrap_error)
self.assertIsInstance(wrap_error, ssl.SSLError)
self.assertIn("before TLS handshake with data", wrap_error.args[1])
self.assertIn("before TLS handshake with data", wrap_error.reason)
self.assertNotEqual(0, wrap_error.args[0])
self.assertIsNone(wrap_error.library, msg="attr must exist")
finally:
# gh-108342: Explicitly break the reference cycle
wrap_error = None
server = None
def test_preauth_data_to_tls_client(self):
server_can_continue_with_wrap_socket = threading.Event()
client_can_continue_with_wrap_socket = threading.Event()
def call_after_accept(conn_to_client):
if not server_can_continue_with_wrap_socket.wait(support.SHORT_TIMEOUT):
print("ERROR: test client took too long")
# This forces an immediate connection close via RST on .close().
set_socket_so_linger_on_with_zero_timeout(conn_to_client)
conn_to_client.send(
b"HTTP/1.0 307 Temporary Redirect\r\n"
b"Location: https://example.com/someone-elses-server\r\n"
b"\r\n")
conn_to_client.close() # RST
client_can_continue_with_wrap_socket.set()
return True # Tell the server to stop.
server = self.SingleConnectionTestServerThread(
call_after_accept=call_after_accept,
name="preauth_data_to_tls_client")
server.__enter__() # starts it
self.addCleanup(server.__exit__) # ... & unittest.TestCase stops it.
# Redundant; call_after_accept sets SO_LINGER on the accepted conn.
set_socket_so_linger_on_with_zero_timeout(server.listener)
with socket.socket() as client:
client.connect(server.listener.getsockname())
server_can_continue_with_wrap_socket.set()
if not client_can_continue_with_wrap_socket.wait(support.SHORT_TIMEOUT):
self.fail("test server took too long")
ssl_ctx = ssl.create_default_context()
try:
tls_client = ssl_ctx.wrap_socket(
client, server_hostname="localhost")
except OSError as err: # SSLError inherits from OSError
wrap_error = err
received_data = b""
else:
wrap_error = None
received_data = tls_client.recv(400)
tls_client.close()
server.join()
try:
self.assertEqual(b"", received_data)
self.assertIsInstance(wrap_error, OSError) # All platforms.
self.non_linux_skip_if_other_okay_error(wrap_error)
self.assertIsInstance(wrap_error, ssl.SSLError)
self.assertIn("before TLS handshake with data", wrap_error.args[1])
self.assertIn("before TLS handshake with data", wrap_error.reason)
self.assertNotEqual(0, wrap_error.args[0])
self.assertIsNone(wrap_error.library, msg="attr must exist")
finally:
# gh-108342: Explicitly break the reference cycle
wrap_error = None
server = None
def test_https_client_non_tls_response_ignored(self):
server_responding = threading.Event()
class SynchronizedHTTPSConnection(http.client.HTTPSConnection):
def connect(self):
# Call clear text HTTP connect(), not the encrypted HTTPS (TLS)
# connect(): wrap_socket() is called manually below.
http.client.HTTPConnection.connect(self)
# Wait for our fault injection server to have done its thing.
if not server_responding.wait(support.SHORT_TIMEOUT) and support.verbose:
sys.stdout.write("server_responding event never set.")
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
def call_after_accept(conn_to_client):
# This forces an immediate connection close via RST on .close().
set_socket_so_linger_on_with_zero_timeout(conn_to_client)
conn_to_client.send(
b"HTTP/1.0 402 Payment Required\r\n"
b"\r\n")
conn_to_client.close() # RST
server_responding.set()
return True # Tell the server to stop.
timeout = 2.0
server = self.SingleConnectionTestServerThread(
call_after_accept=call_after_accept,
name="non_tls_http_RST_responder",
timeout=timeout)
server.__enter__() # starts it
self.addCleanup(server.__exit__) # ... & unittest.TestCase stops it.
# Redundant; call_after_accept sets SO_LINGER on the accepted conn.
set_socket_so_linger_on_with_zero_timeout(server.listener)
connection = SynchronizedHTTPSConnection(
server.listener.getsockname()[0],
port=server.port,
context=ssl.create_default_context(),
timeout=timeout,
)
# There are lots of reasons this raises as desired, long before this
# test was added. Sending the request requires a successful TLS wrapped
# socket; that fails if the connection is broken. It may seem pointless
# to test this. It serves as an illustration of something that we never
# want to happen... properly not happening.
with self.assertRaises(OSError):
connection.request("HEAD", "/test", headers={"Host": "localhost"})
response = connection.getresponse()
server.join()
def setUpModule():
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
thread_info = support.threading_setup()
unittest.addModuleCleanup(support.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
| TestPreHandshakeClose |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 40927,
"end": 40983
} | class ____(SegmentInfo):
mem_size: int
| LoadedSegmentInfo |
python | tensorflow__tensorflow | tensorflow/python/keras/initializers/initializers_v2.py | {
"start": 9665,
"end": 11658
} | class ____(Initializer):
"""Initializer that generates tensors with a normal distribution.
Also available via the shortcut function
`tf.keras.initializers.random_normal`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _assert_float_dtype(_get_dtype(dtype))
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_normal(shape, self.mean, self.stddev,
dtype)
def get_config(self):
return {
'mean': self.mean,
'stddev': self.stddev,
'seed': self.seed
}
| RandomNormal |
python | huggingface__transformers | src/transformers/models/rag/modeling_rag.py | {
"start": 37484,
"end": 59939
} | class ____(RagPreTrainedModel):
def __init__(
self,
config: Optional[PreTrainedConfig] = None,
question_encoder: Optional[PreTrainedModel] = None,
generator: Optional[PreTrainedModel] = None,
retriever: Optional[RagRetriever] = None,
**kwargs,
):
r"""
question_encoder (`PreTrainedModel`, *optional*):
The model responsible for encoding the question into hidden states for retrieval.
generator (`PreTrainedModel`, *optional*):
The model responsible for generating text based on retrieved documents.
retriever (`RagRetriever`, *optional*):
The component responsible for retrieving documents from a knowledge base given the encoded question.
"""
assert config is not None or (question_encoder is not None and generator is not None), (
"Either a configuration or an encoder and a generator has to be provided."
)
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kwargs
)
super().__init__(config)
# instantiate model
self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
def set_retriever(self, retriever: RagRetriever):
self.rag.retriever = retriever
def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
self.rag.context_encoder_training = True
self.rag.ctx_encoder = ctx_encoder
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.Tensor]]] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
past_key_values: Optional[Cache] = None,
context_input_ids: Optional[torch.LongTensor] = None,
context_attention_mask: Optional[torch.LongTensor] = None,
doc_scores: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_retrieved: Optional[bool] = None,
exclude_bos_score: Optional[bool] = None,
reduce_loss: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
n_docs: Optional[int] = None,
**kwargs, # needs kwargs for generation
) -> RetrievAugLMMarginOutput:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
obtain the indices.
[What are input IDs?](../glossary#input-ids)
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)
Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
*optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
generator's encoder.
Used by the ([`RagModel`]) model during decoding.
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Provide for generation tasks. `None` by default, construct as per instructions for the generator model
you're using with your RAG instance.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to
the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be
provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
has to be provided to the forward pass. `doc_scores` can be computed via
`question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
output_retrieved (`bool`, *optional*):
Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask`. See returned tensors for more detail.
exclude_bos_score (`bool`, *optional*):
Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing
the loss.
reduce_loss (`bool`, *optional*):
Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
operation.
n_docs (`int`, *optional*):
The number of documents to retrieve.
Example:
```python
>>> from transformers import AutoTokenizer, RagRetriever, RagSequenceForGeneration
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
>>> retriever = RagRetriever.from_pretrained(
... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
... )
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
>>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
>>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
>>> input_ids = inputs["input_ids"]
>>> labels = targets["input_ids"]
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> # or use retriever separately
>>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True)
>>> # 1. Encode
>>> question_hidden_states = model.question_encoder(input_ids)[0]
>>> # 2. Retrieve
>>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
>>> doc_scores = torch.bmm(
... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
... ).squeeze(1)
>>> # 3. Forward to generator
>>> outputs = model(
... context_input_ids=docs_dict["context_input_ids"],
... context_attention_mask=docs_dict["context_attention_mask"],
... doc_scores=doc_scores,
... decoder_input_ids=labels,
... )
```"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score
reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = labels
use_cache = False
outputs = self.rag(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_retrieved=output_retrieved,
n_docs=n_docs,
)
loss = None
if labels is not None:
loss = self.get_nll(
outputs.logits,
outputs.doc_scores,
decoder_input_ids,
reduce_loss=reduce_loss,
epsilon=self.config.label_smoothing,
exclude_bos_score=exclude_bos_score,
n_docs=n_docs,
)
return RetrievAugLMMarginOutput(
loss=loss,
logits=outputs.logits,
doc_scores=outputs.doc_scores,
past_key_values=outputs.past_key_values,
context_input_ids=outputs.context_input_ids,
context_attention_mask=outputs.context_attention_mask,
retrieved_doc_embeds=outputs.retrieved_doc_embeds,
retrieved_doc_ids=outputs.retrieved_doc_ids,
question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
question_enc_hidden_states=outputs.question_enc_hidden_states,
question_enc_attentions=outputs.question_enc_attentions,
generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
generator_enc_hidden_states=outputs.generator_enc_hidden_states,
generator_enc_attentions=outputs.generator_enc_attentions,
generator_dec_hidden_states=outputs.generator_dec_hidden_states,
generator_dec_attentions=outputs.generator_dec_attentions,
generator_cross_attentions=outputs.generator_cross_attentions,
)
@property
def retriever(self):
return self.rag.retriever
@property
def generator(self):
return self.rag.generator
@property
def question_encoder(self):
return self.rag.question_encoder
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
context_input_ids: Optional[torch.LongTensor] = None,
context_attention_mask: Optional[torch.LongTensor] = None,
doc_scores: Optional[torch.FloatTensor] = None,
do_deduplication: Optional[bool] = None, # defaults to True
num_return_sequences: Optional[int] = None, # defaults to 1
num_beams: Optional[int] = None, # defaults to 1
n_docs: Optional[int] = None,
**model_kwargs,
) -> torch.LongTensor:
"""
Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation
for more information on how to set other generate input parameters.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The sequence used as a prompt for the generation. If `input_ids` is not passed, then
`context_input_ids` has to be provided.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
retriever.
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
If the model is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and
`context_attention_mask` have to be provided to the forward pass. They are returned by
[`~RagRetriever.__call__`].
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
If the model is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be
provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`].
do_deduplication (`bool`, *optional*):
Whether or not to deduplicate the generations from different context documents for a given input. Has
to be set to `False` if used while training with distributed backend.
num_return_sequences(`int`, *optional*, defaults to 1):
The number of independently computed returned sequences for each element in the batch. Note that this
is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function,
where we set `num_return_sequences` to `num_beams`.
num_beams (`int`, *optional*, defaults to 1):
Number of beams for beam search. 1 means no beam search.
n_docs (`int`, *optional*, defaults to `config.n_docs`)
Number of documents to retrieve and/or number of documents for which to generate an answer.
kwargs (`dict[str, Any]`, *optional*):
Additional kwargs will be passed to [`~generation.GenerationMixin.generate`].
Return:
`torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches
finished early due to the `eos_token_id`.
"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
num_doc_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
num_beams = num_beams if num_beams is not None else self.config.num_beams
assert input_ids is not None or context_input_ids is not None, (
" At least one of input_ids or context_input_ids must be given"
)
if self.retriever is not None and context_input_ids is None:
question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
context_input_ids = self.retriever(
input_ids,
question_hidden_states.detach().to(device="cpu", dtype=torch.float32).numpy(),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="pt",
)["context_input_ids"]
# set to correct device
context_input_ids = context_input_ids.to(input_ids)
hypos = []
model_kwargs["num_beams"] = num_beams
model_kwargs["num_return_sequences"] = num_beams
model_kwargs["attention_mask"] = None
batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
for index in range(batch_size):
# first, generate beams from documents:
generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)
output_sequences = self.generator.generate(
generator_input_ids,
**model_kwargs,
) # n_docs * n_beam, tgt_len
if do_deduplication:
# do_deduplication, max_output_len
output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values()))
num_candidates = output_sequences.shape[
0
] # after deduplication, this number can be less than n_docs*n_beam
# then, run model forwards to get nll scores:
if input_ids is not None:
new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1)
outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
else: # input_ids is None, need context_input_ids/mask and doc_scores
assert context_attention_mask is not None, (
"Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you"
" can set a retriever using the `set_retriever(...)` function."
)
assert doc_scores is not None, (
"Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a"
" retriever using the `set_retriever(...)` function."
)
individual_input_ids = generator_input_ids.repeat(
num_candidates, 1
) # (num_candidates*n_docs, max_len)
individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]
individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1)
individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]
individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs]
outputs = self(
context_input_ids=individual_input_ids,
context_attention_mask=individual_attention_mask,
doc_scores=individual_doc_scores,
labels=output_sequences,
exclude_bos_score=True,
)
top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1]
# add hypothesis
hypos.append(output_sequences[top_cand_inds])
return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
def get_nll(
self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
):
# shift tokens left
target = torch.cat(
[target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
)
n_docs = n_docs if n_docs is not None else self.config.n_docs
# bos_token_id is None for T5
bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all()
def _mask_pads(ll, smooth_obj):
pad_mask = target.eq(self.config.generator.pad_token_id)
if pad_mask.any():
ll.masked_fill_(pad_mask, 0.0)
smooth_obj.masked_fill_(pad_mask, 0.0)
return ll.squeeze(-1), smooth_obj.squeeze(-1)
# seq_logits dim = (batch*n_docs, tgt_len , #vocabs)
seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(
seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
) # batch_size x n_docs x tgt_len x #vocab_size
doc_logprobs = nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1)
# RAG-sequence marginalization
first_token_scores = seq_logprobs[:, :, :1, :]
second_token_scores = seq_logprobs[:, :, 1:2, :]
remainder = seq_logprobs[:, :, 2:, :]
rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2)
# calculate loss
target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1)
assert target.dim() == rag_logprobs.dim()
ll = rag_logprobs.gather(dim=-1, index=target)
smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
ll, smooth_obj = _mask_pads(ll, smooth_obj)
# sum over tokens, exclude bos while scoring
ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2)
smooth_obj = smooth_obj.sum(2)
ll = ll.logsumexp(1) # logsumexp over docs
smooth_obj = smooth_obj.logsumexp(1)
nll_loss = -ll
smooth_loss = -smooth_obj
if reduce_loss:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / rag_logprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
@staticmethod
def _cat_and_pad(tensors, pad_token_id):
output = tensors[0].new(sum(t.shape[0] for t in tensors), max(t.shape[1] for t in tensors)).fill_(pad_token_id)
ind = 0
for t in tensors:
output[ind : ind + t.shape[0], : t.shape[1]] = t
ind += t.shape[0]
return output
@auto_docstring(
custom_intro="""
A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.
"""
)
| RagSequenceForGeneration |
python | getsentry__sentry | src/sentry/seer/sentry_data_models.py | {
"start": 1914,
"end": 2161
} | class ____(BaseModel):
profile_id: str
transaction_name: str | None
execution_tree: list[ExecutionTreeNode]
project_id: int
start_ts: float | None = None
end_ts: float | None = None
is_continuous: bool = False
| ProfileData |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 28762,
"end": 29434
} | class ____(ProjectRedirectsMixin, GenericModelView):
"""
Insert a redirect in a specific position.
This is done by changing the position of the redirect,
after saving the redirect, all other positions are updated
automatically.
"""
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
redirect = self.get_object()
position = int(self.kwargs["position"])
redirect.position = position
redirect.save()
return HttpResponseRedirect(
reverse(
"projects_redirects",
args=[self.get_project().slug],
)
)
| ProjectRedirectsInsert |
python | fastapi__sqlmodel | docs_src/tutorial/connect/select/tutorial003.py | {
"start": 254,
"end": 2188
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
session.add(team_preventers)
session.add(team_z_force)
session.commit()
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team_id=team_z_force.id
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
team_id=team_preventers.id,
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
def select_heroes():
with Session(engine) as session:
statement = select(Hero, Team).join(Team, isouter=True)
results = session.exec(statement)
for hero, team in results:
print("Hero:", hero, "Team:", team)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | scipy__scipy | scipy/optimize/_shgo_lib/_vertex.py | {
"start": 4530,
"end": 5006
} | class ____(VertexBase):
"""
Add homology properties of a scalar field f: R^n --> R^m associated with
the geometry built from the VertexBase class.
"""
def __init__(self, x, sfield=None, vfield=None, field_args=(),
vfield_args=(), g_cons=None,
g_cons_args=(), nn=None, index=None):
super().__init__(x, nn=nn, index=index)
raise NotImplementedError("This class is still a work in progress")
| VertexVectorField |
python | plotly__plotly.py | _plotly_utils/png.py | {
"start": 10825,
"end": 42784
} | class ____:
"""
PNG encoder in pure Python.
"""
def __init__(
self,
width=None,
height=None,
size=None,
greyscale=Default,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
planes=None,
colormap=None,
maxval=None,
chunk_limit=2**20,
x_pixels_per_unit=None,
y_pixels_per_unit=None,
unit_is_meter=False,
):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Pixels are greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16 (for each channel).
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
x_pixels_per_unit
Number of pixels a unit along the x axis (write a
`pHYs` chunk).
y_pixels_per_unit
Number of pixels a unit along the y axis (write a
`pHYs` chunk). Along with `x_pixel_unit`, this gives
the pixel size ratio.
unit_is_meter
`True` to indicate that the unit (for the `pHYs`
chunk) is metre.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument.
If `size` is used it should be a pair (*width*, *height*).
The `greyscale` argument indicates whether input pixels
are greyscale (when true), or colour (when false).
The default is true unless `palette=` is used.
The `alpha` argument (a boolean) specifies
whether input pixels have an alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each channel may have a different bit depth.
Each source pixel must have values that are
an integer between 0 and ``2**bitdepth-1``, where
`bitdepth` is the bit depth for the corresponding channel.
For example, 8-bit images have values between 0 and 255.
PNG only stores images with bit depths of
1,2,4,8, or 16 (the same for all channels).
When `bitdepth` is not one of these values or where
channels have different bit depths,
the next highest valid bit depth is selected,
and an ``sBIT`` (significant bits) chunk is generated
that specifies the original precision of the source image.
In this case the supplied pixel values will be rescaled to
fit the range of the selected bit depth.
The PNG file format supports many bit depth / colour model
combinations, but not all.
The details are somewhat arcane
(refer to the PNG specification for full details).
Briefly:
Bit depths < 8 (1,2,4) are only allowed with greyscale and
colour mapped images;
colour mapped images cannot have bit depth 16.
For colour mapped images
(in other words, when the `palette` argument is specified)
the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8.
(It is valid to have a PNG image with a palette and
an ``sBIT`` chunk, but the meaning is slightly different;
it would be awkward to use the `bitdepth` argument for this.)
The `palette` option, when specified,
causes a colour mapped image to be created:
the PNG colour type is set to 3;
`greyscale` must not be true; `alpha` must not be true;
`transparent` must not be set.
The bit depth must be 1,2,4, or 8.
When a colour mapped image is created,
the pixel values are palette indexes and
the `bitdepth` argument specifies the size of these indexes
(not the size of the colour values in the palette).
The palette argument value should be a sequence of 3- or
4-tuples.
3-tuples specify RGB palette entries;
4-tuples specify RGBA palette entries.
All the 4-tuples (if present) must come before all the 3-tuples.
A ``PLTE`` chunk is created;
if there are 4-tuples then a ``tRNS`` chunk is created as well.
The ``PLTE`` chunk will contain all the RGB triples in the same
sequence;
the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence.
Palette entries are always 8-bit.
If specified, the `transparent` and `background` parameters must be
a tuple with one element for each channel in the image.
Either a 3-tuple of integer (RGB) values for a colour image, or
a 1-tuple of a single integer for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a `float`).
A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file,
they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module.
Values from 1 to 9 (highest) specify compression.
0 means no compression.
-1 and ``None`` both mean that the ``zlib`` module uses
the default level of compession (which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interace method, *Adam7*).
This does not affect how the pixels should be passed in,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be
partially decoded by the browser to give
a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image.
In order to avoid using large amounts of memory,
multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if not is_natural(width) or not is_natural(height):
raise ProtocolError("width and height must be integers")
if width <= 0 or height <= 0:
raise ProtocolError("width and height must be greater than zero")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2**31 - 1 or height > 2**31 - 1:
raise ProtocolError("width and height cannot exceed 2**31-1")
if alpha and transparent is not None:
raise ProtocolError("transparent colour not allowed with alpha channel")
# bitdepth is either single integer, or tuple of integers.
# Convert to tuple.
try:
len(bitdepth)
except TypeError:
bitdepth = (bitdepth,)
for b in bitdepth:
valid = is_natural(b) and 1 <= b <= 16
if not valid:
raise ProtocolError(
"each bitdepth %r must be a positive integer <= 16" % (bitdepth,)
)
# Calculate channels, and
# expand bitdepth to be one element per channel.
palette = check_palette(palette)
alpha = bool(alpha)
colormap = bool(palette)
if greyscale is Default and palette:
greyscale = False
greyscale = bool(greyscale)
if colormap:
color_planes = 1
planes = 1
else:
color_planes = (3, 1)[greyscale]
planes = color_planes + alpha
if len(bitdepth) == 1:
bitdepth *= planes
bitdepth, self.rescale = check_bitdepth_rescale(
palette, bitdepth, transparent, alpha, greyscale
)
# These are assertions, because above logic should have
# corrected or raised all problematic cases.
if bitdepth < 8:
assert greyscale or palette
assert not alpha
if bitdepth > 8:
assert not palette
transparent = check_color(transparent, greyscale, "transparent")
background = check_color(background, greyscale, "background")
# It's important that the true boolean values
# (greyscale, alpha, colormap, interlace) are converted
# to bool because Iverson's convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = greyscale
self.alpha = alpha
self.colormap = colormap
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = palette
self.x_pixels_per_unit = x_pixels_per_unit
self.y_pixels_per_unit = y_pixels_per_unit
self.unit_is_meter = bool(unit_is_meter)
self.color_type = 4 * self.alpha + 2 * (not greyscale) + 1 * self.colormap
assert self.color_type in (0, 2, 3, 4, 6)
self.color_planes = color_planes
self.planes = planes
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth / 8) * self.planes
def write(self, outfile, rows):
"""
Write a PNG image to the output file.
`rows` should be an iterable that yields each row
(each row is a sequence of values).
The rows should be the rows of the original image,
so there should be ``self.height`` rows of
``self.width * self.planes`` values.
If `interlace` is specified (when creating the instance),
then an interlaced PNG file will be written.
Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing requires the entire image to be in working memory.
"""
# Values per row
vpr = self.width * self.planes
def check_rows(rows):
"""
Yield each row in rows,
but check each row first (for correct width).
"""
for i, row in enumerate(rows):
try:
wrong_length = len(row) != vpr
except TypeError:
# When using an itertools.ichain object or
# other generator not supporting __len__,
# we set this to False to skip the check.
wrong_length = False
if wrong_length:
# Note: row numbers start at 0.
raise ProtocolError(
"Expected %d values but got %d values, in row %d"
% (vpr, len(row), i)
)
yield row
if self.interlace:
fmt = "BH"[self.bitdepth > 8]
a = array(fmt, itertools.chain(*check_rows(rows)))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, check_rows(rows))
if nrows != self.height:
raise ProtocolError(
"rows supplied (%d) does not match height (%d)" % (nrows, self.height)
)
def write_passes(self, outfile, rows):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file.
For straightlaced images, this is the usual top to bottom ordering.
For interlaced images the rows should have been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row
(each row being a sequence of values).
"""
# Ensure rows are scaled (to 4-/8-/16-bit),
# and packed into bytes.
if self.rescale:
rows = rescale_rows(rows, self.rescale)
if self.bitdepth < 8:
rows = pack_rows(rows, self.bitdepth)
elif self.bitdepth == 16:
rows = unpack_rows(rows)
return self.write_packed(outfile, rows)
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`.
`rows` should be an iterator that yields each packed row;
a packed row being a sequence of packed bytes.
The rows have a filter byte prefixed and
are then compressed into one or more IDAT chunks.
They are not processed any further,
so if bitdepth is other than 1, 2, 4, 8, 16,
the pixel values should have been scaled
before passing them to this method.
This method does work for interlaced images but it is best avoided.
For interlaced images, the rows should be
presented in the order that they appear in the file.
"""
self.write_preamble(outfile)
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# data accumulates bytes to be compressed for the IDAT chunk;
# it's compressed when sufficiently large.
data = bytearray()
for i, row in enumerate(rows):
# Add "None" filter type.
# Currently, it's essential that this filter type be used
# for every scanline as
# we do not mark the first row of a reduced pass image;
# that means we could accidentally compute
# the wrong filtered scanline if we used
# "up", "average", or "paeth" on such a line.
data.append(0)
data.extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(data)
if len(compressed):
write_chunk(outfile, b"IDAT", compressed)
data = bytearray()
compressed = compressor.compress(bytes(data))
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, b"IDAT", compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, b"IEND")
return i + 1
def write_preamble(self, outfile):
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(
outfile,
b"IHDR",
struct.pack(
"!2I5B",
self.width,
self.height,
self.bitdepth,
self.color_type,
0,
0,
self.interlace,
),
)
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(
outfile, b"gAMA", struct.pack("!L", int(round(self.gamma * 1e5)))
)
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(
outfile,
b"sBIT",
struct.pack("%dB" % self.planes, *[s[0] for s in self.rescale]),
)
# :chunk:order: Without a palette (PLTE chunk),
# ordering is relatively relaxed.
# With one, gAMA chunk must precede PLTE chunk
# which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p, t = make_palette_chunks(self.palette)
write_chunk(outfile, b"PLTE", p)
if t:
# tRNS chunk is optional;
# Only needed if palette entries have alpha.
write_chunk(outfile, b"tRNS", t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
fmt = "!1H"
else:
fmt = "!3H"
write_chunk(outfile, b"tRNS", struct.pack(fmt, *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
fmt = "!1H"
else:
fmt = "!3H"
write_chunk(outfile, b"bKGD", struct.pack(fmt, *self.background))
# http://www.w3.org/TR/PNG/#11pHYs
if self.x_pixels_per_unit is not None and self.y_pixels_per_unit is not None:
tup = (
self.x_pixels_per_unit,
self.y_pixels_per_unit,
int(self.unit_is_meter),
)
write_chunk(outfile, b"pHYs", struct.pack("!LLB", *tup))
def write_array(self, outfile, pixels):
"""
Write an array that holds all the image values
as a PNG file on the output file.
See also :meth:`write` method.
"""
if self.interlace:
if not isarray(pixels):
# Coerce to array type
fmt = "BH"[self.bitdepth > 8]
pixels = array(fmt, pixels)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def array_scanlines(self, pixels):
"""
Generates rows (each a sequence of values) from
a single array of values.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array.
`pixels` is the full source image as a single array of values.
The generator yields each scanline of the reduced passes in turn,
each scanline being a sequence of values.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = "BH"[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
# Each iteration generates a scanline starting at (x, y)
# and consisting of every xstep pixels.
for lines in adam7_generate(self.width, self.height):
for x, y, xstep in lines:
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width - x) / float(xstep)))
# Values per row (of reduced image)
reduced_row_len = ppr * self.planes
if xstep == 1:
# Easy case: line is a simple slice.
offset = y * vpr
yield pixels[offset : offset + vpr]
continue
# We have to step by xstep,
# which we can do one plane at a time
# using the step in Python slices.
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:reduced_row_len])
offset = y * vpr + x * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i :: self.planes] = pixels[offset + i : end_offset : skip]
yield row
def write_chunk(outfile, tag, data=b""):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
data = bytes(data)
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2**32 - 1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(signature)
for chunk in chunks:
write_chunk(out, *chunk)
def rescale_rows(rows, rescale):
"""
Take each row in rows (an iterator) and yield
a fresh row with the pixels scaled according to
the rescale parameters in the list `rescale`.
Each element of `rescale` is a tuple of
(source_bitdepth, target_bitdepth),
with one element per channel.
"""
# One factor for each channel
fs = [float(2 ** s[1] - 1) / float(2 ** s[0] - 1) for s in rescale]
# Assume all target_bitdepths are the same
target_bitdepths = set(s[1] for s in rescale)
assert len(target_bitdepths) == 1
(target_bitdepth,) = target_bitdepths
typecode = "BH"[target_bitdepth > 8]
# Number of channels
n_chans = len(rescale)
for row in rows:
rescaled_row = array(typecode, iter(row))
for i in range(n_chans):
channel = array(typecode, (int(round(fs[i] * x)) for x in row[i::n_chans]))
rescaled_row[i::n_chans] = channel
yield rescaled_row
def pack_rows(rows, bitdepth):
"""Yield packed rows that are a byte array.
Each byte is packed with the values from several pixels.
"""
assert bitdepth < 8
assert 8 % bitdepth == 0
# samples per byte
spb = int(8 / bitdepth)
def make_byte(block):
"""Take a block of (2, 4, or 8) values,
and pack them into a single byte.
"""
res = 0
for v in block:
res = (res << bitdepth) + v
return res
for row in rows:
a = bytearray(row)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
n = float(len(a))
extra = math.ceil(n / spb) * spb - n
a.extend([0] * int(extra))
# Pack into bytes.
# Each block is the samples for one byte.
blocks = group(a, spb)
yield bytearray(make_byte(block) for block in blocks)
def unpack_rows(rows):
"""Unpack each row from being 16-bits per value,
to being a sequence of bytes.
"""
for row in rows:
fmt = "!%dH" % len(row)
yield bytearray(struct.pack(fmt, *row))
def make_palette_chunks(palette):
"""
Create the byte sequences for a ``PLTE`` and
if necessary a ``tRNS`` chunk.
Returned as a pair (*p*, *t*).
*t* will be ``None`` if no ``tRNS`` chunk is necessary.
"""
p = bytearray()
t = bytearray()
for x in palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
if t:
return p, t
return p, None
def check_bitdepth_rescale(palette, bitdepth, transparent, alpha, greyscale):
"""
Returns (bitdepth, rescale) pair.
"""
if palette:
if len(bitdepth) != 1:
raise ProtocolError("with palette, only a single bitdepth may be used")
(bitdepth,) = bitdepth
if bitdepth not in (1, 2, 4, 8):
raise ProtocolError("with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ProtocolError("transparent and palette not compatible")
if alpha:
raise ProtocolError("alpha and palette not compatible")
if greyscale:
raise ProtocolError("greyscale and palette not compatible")
return bitdepth, None
# No palette, check for sBIT chunk generation.
if greyscale and not alpha:
# Single channel, L.
(bitdepth,) = bitdepth
if bitdepth in (1, 2, 4, 8, 16):
return bitdepth, None
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5, 6, 7)
targetbitdepth = 8
return targetbitdepth, [(bitdepth, targetbitdepth)]
assert alpha or not greyscale
depth_set = tuple(set(bitdepth))
if depth_set in [(8,), (16,)]:
# No sBIT required.
(bitdepth,) = depth_set
return bitdepth, None
targetbitdepth = (8, 16)[max(bitdepth) > 8]
return targetbitdepth, [(b, targetbitdepth) for b in bitdepth]
# Regex for decoding mode string
RegexModeDecode = re.compile("(LA?|RGBA?);?([0-9]*)", flags=re.IGNORECASE)
def from_array(a, mode=None, info={}):
"""
Create a PNG :class:`Image` object from a 2-dimensional array.
One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
Unless they are specified using the *info* parameter,
the PNG's height and width are taken from the array size.
The first axis is the height; the second axis is the
ravelled width and channel index.
The array is treated is a sequence of rows,
each row being a sequence of values (``width*channels`` in number).
So an RGB image that is 16 pixels high and 8 wide will
occupy a 2-dimensional array that is 16x24
(each row will be 8*3 = 24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth
(overriding how this function normally derives the bit depth,
see below).
Appending ``';16'`` to the mode will cause the PNG to be
16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array,
but it can be any suitable Python sequence.
For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``.
The exact rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension.
It's slightly more complicated than that because
an iterator of rows can be used, and it all still works.
Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from
the array element's datatype
(but if *mode* specifies a bitdepth then that is used instead).
The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects.
A 1 byte datatype will give a bit depth of 8,
a 2 byte datatype will give a bit depth of 16.
If the datatype does not have an implicit size,
like the above example where it is a plain Python list of lists,
then a default of 8 is used.
The *info* parameter is a dictionary that can
be used to specify metadata (in the same style as
the arguments to the :class:`png.Writer` class).
For this function the keys that are useful are:
height
overrides the height derived from the array dimensions and
allows *a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype
(but must match *mode* if that also specifies a bit depth).
Generally anything specified in the *info* dictionary will
override any implicit choices that this function would otherwise make,
but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and
false when mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
match = RegexModeDecode.match(mode)
if not match:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode, bitdepth = match.groups()
if bitdepth:
bitdepth = int(bitdepth)
# Colour format.
if "greyscale" in info:
if bool(info["greyscale"]) != ("L" in mode):
raise ProtocolError("info['greyscale'] should match mode.")
info["greyscale"] = "L" in mode
alpha = "A" in mode
if "alpha" in info:
if bool(info["alpha"]) != alpha:
raise ProtocolError("info['alpha'] should match mode.")
info["alpha"] = alpha
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get("bitdepth") and bitdepth != info["bitdepth"]:
raise ProtocolError(
"bitdepth (%d) should match bitdepth of info (%d)."
% (bitdepth, info["bitdepth"])
)
info["bitdepth"] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
width, height = check_sizes(info.get("size"), info.get("width"), info.get("height"))
if width:
info["width"] = width
if height:
info["height"] = height
if "height" not in info:
try:
info["height"] = len(a)
except TypeError:
raise ProtocolError("len(a) does not work, supply info['height'] instead.")
planes = len(mode)
if "planes" in info:
if info["planes"] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a, t = itertools.tee(a)
row = next(t)
del t
testelement = row
if "width" not in info:
width = len(row) // planes
info["width"] = width
if "bitdepth" not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's datatype,
# use a default of 8.
bitdepth = 8
else:
# If we got here without exception,
# we now assume that the array is a numpy array.
if dtype.kind == "b":
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info["bitdepth"] = bitdepth
for thing in ["width", "height", "bitdepth", "greyscale", "alpha"]:
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
| Writer |
python | django__django | django/http/response.py | {
"start": 18828,
"end": 22160
} | class ____(StreamingHttpResponse):
"""
A streaming HTTP response class optimized for files.
"""
block_size = 4096
def __init__(self, *args, as_attachment=False, filename="", **kwargs):
self.as_attachment = as_attachment
self.filename = filename
self._no_explicit_content_type = (
"content_type" not in kwargs or kwargs["content_type"] is None
)
super().__init__(*args, **kwargs)
def _set_streaming_content(self, value):
if not hasattr(value, "read"):
self.file_to_stream = None
return super()._set_streaming_content(value)
self.file_to_stream = filelike = value
if hasattr(filelike, "close"):
self._resource_closers.append(filelike.close)
value = iter(lambda: filelike.read(self.block_size), b"")
self.set_headers(filelike)
super()._set_streaming_content(value)
def set_headers(self, filelike):
"""
Set some common response headers (Content-Length, Content-Type, and
Content-Disposition) based on the `filelike` response content.
"""
filename = getattr(filelike, "name", "")
filename = filename if isinstance(filename, str) else ""
seekable = hasattr(filelike, "seek") and (
not hasattr(filelike, "seekable") or filelike.seekable()
)
if hasattr(filelike, "tell"):
if seekable:
initial_position = filelike.tell()
filelike.seek(0, io.SEEK_END)
self.headers["Content-Length"] = filelike.tell() - initial_position
filelike.seek(initial_position)
elif hasattr(filelike, "getbuffer"):
self.headers["Content-Length"] = (
filelike.getbuffer().nbytes - filelike.tell()
)
elif os.path.exists(filename):
self.headers["Content-Length"] = (
os.path.getsize(filename) - filelike.tell()
)
elif seekable:
self.headers["Content-Length"] = sum(
iter(lambda: len(filelike.read(self.block_size)), 0)
)
filelike.seek(-int(self.headers["Content-Length"]), io.SEEK_END)
filename = os.path.basename(self.filename or filename)
if self._no_explicit_content_type:
if filename:
content_type, encoding = mimetypes.guess_type(filename)
# Encoding isn't set to prevent browsers from automatically
# uncompressing files.
content_type = {
"br": "application/x-brotli",
"bzip2": "application/x-bzip",
"compress": "application/x-compress",
"gzip": "application/gzip",
"xz": "application/x-xz",
}.get(encoding, content_type)
self.headers["Content-Type"] = (
content_type or "application/octet-stream"
)
else:
self.headers["Content-Type"] = "application/octet-stream"
if content_disposition := content_disposition_header(
self.as_attachment, filename
):
self.headers["Content-Disposition"] = content_disposition
| FileResponse |
python | huggingface__transformers | src/transformers/models/mistral3/modeling_mistral3.py | {
"start": 8511,
"end": 16071
} | class ____(Mistral3PreTrainedModel):
_checkpoint_conversion_mapping = {
r"^language_model.model": "language_model",
}
def __init__(self, config: Mistral3Config):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = Mistral3MultiModalProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_sizes: torch.Tensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
**kwargs,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
vision_feature_layer (`Union[int, list[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
image_sizes (`torch.Tensor`, *optional*):
Tensor containing the image sizes as returned by the processor.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
# this is not memory efficient at all (output_hidden_states=True) will save all the hidden states.
image_outputs = self.vision_tower(pixel_values, image_sizes=image_sizes, output_hidden_states=True, **kwargs)
# If we have one vision feature layer, return the corresponding hidden states,
# otherwise, select the hidden states of each feature layer and concatenate them
if isinstance(vision_feature_layer, int):
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
else:
hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
selected_image_feature = torch.cat(hs_pool, dim=-1)
image_features = self.multi_modal_projector(selected_image_feature.squeeze(0), image_sizes)
downsample_ratio = self.vision_tower.patch_size * self.config.spatial_merge_size
split_sizes = [(height // downsample_ratio) * (width // downsample_ratio) for height, width in image_sizes]
image_features = torch.split(image_features.squeeze(0), split_sizes)
return image_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
image_sizes: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Mistral3ModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
image_sizes=image_sizes,
)
image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
return Mistral3ModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring(
custom_intro="""
The MISTRAL3 model which consists of a vision backbone and a language model.
"""
)
| Mistral3Model |
python | agronholm__apscheduler | src/apscheduler/eventbrokers/asyncpg.py | {
"start": 792,
"end": 6381
} | class ____(BaseExternalEventBroker):
"""
An asynchronous, asyncpg_ based event broker that uses a PostgreSQL server to
broadcast events using its ``NOTIFY`` mechanism.
.. _asyncpg: https://pypi.org/project/asyncpg/
:param dsn: a libpq connection string (e.g.
``postgres://user:pass@host:port/dbname``)
:param options: extra keyword arguments passed to :func:`asyncpg.connection.connect`
:param channel: the ``NOTIFY`` channel to use
:param max_idle_time: maximum time to let the connection go idle, before sending a
``SELECT 1`` query to prevent a connection timeout
"""
dsn: str
options: Mapping[str, Any] = attrs.field(
factory=dict, validator=instance_of(Mapping)
)
channel: str = attrs.field(kw_only=True, default="apscheduler")
max_idle_time: float = attrs.field(kw_only=True, default=10)
_send: MemoryObjectSendStream[str] = attrs.field(init=False)
@classmethod
def from_async_sqla_engine(
cls,
engine: AsyncEngine,
options: Mapping[str, Any] | None = None,
**kwargs: Any,
) -> AsyncpgEventBroker:
"""
Create a new asyncpg event broker from an SQLAlchemy engine.
The engine will only be used to create the appropriate options for
:func:`asyncpg.connection.connect`.
:param engine: an asynchronous SQLAlchemy engine using asyncpg as the driver
:type engine: ~sqlalchemy.ext.asyncio.AsyncEngine
:param options: extra keyword arguments passed to
:func:`asyncpg.connection.connect`
:param kwargs: keyword arguments to pass to the initializer of this class
:return: the newly created event broker
"""
if engine.dialect.driver != "asyncpg":
raise ValueError(
f'The driver in the engine must be "asyncpg" (current: '
f"{engine.dialect.driver})"
)
dsn = engine.url.render_as_string(hide_password=False).replace("+asyncpg", "")
return cls(dsn, options or {}, **kwargs)
def __repr__(self) -> str:
return create_repr(self, "dsn")
@property
def _temporary_failure_exceptions(self) -> tuple[type[Exception], ...]:
return OSError, InterfaceError
@asynccontextmanager
async def _connect(self) -> AsyncGenerator[asyncpg.Connection, None]:
async for attempt in self._retry():
with attempt:
conn = await asyncpg.connect(self.dsn, **self.options)
try:
yield conn
finally:
with move_on_after(5, shield=True):
await conn.close(timeout=3)
async def start(self, exit_stack: AsyncExitStack, logger: Logger) -> None:
await super().start(exit_stack, logger)
self._send, receive = create_memory_object_stream[str](100)
await exit_stack.enter_async_context(self._send)
await self._task_group.start(self._listen_notifications, receive)
async def _listen_notifications(
self, receive: MemoryObjectReceiveStream[str], *, task_status: TaskStatus[None]
) -> None:
conn: Connection
def listen_callback(
connection: Connection, pid: int, channel: str, payload: str
) -> None:
event = self.reconstitute_event_str(payload)
if event is not None:
self._task_group.start_soon(self.publish_local, event)
async def unsubscribe() -> None:
if not conn.is_closed():
with move_on_after(3, shield=True):
await conn.remove_listener(self.channel, listen_callback)
task_started_sent = False
with receive:
while True:
async with AsyncExitStack() as exit_stack:
conn = await exit_stack.enter_async_context(self._connect())
self._logger.info("Connection established")
try:
await conn.add_listener(self.channel, listen_callback)
exit_stack.push_async_callback(unsubscribe)
if not task_started_sent:
task_status.started()
task_started_sent = True
while True:
notification: str | None = None
with move_on_after(self.max_idle_time):
try:
notification = await receive.receive()
except EndOfStream:
self._logger.info("Stream finished")
return
if notification:
await conn.execute(
"SELECT pg_notify($1, $2)",
self.channel,
notification,
)
else:
await conn.execute("SELECT 1")
except InterfaceError as exc:
self._logger.error("Connection error: %s", exc)
async def publish(self, event: Event) -> None:
notification = self.generate_notification_str(event)
if len(notification) > 7999:
raise SerializationError(
"Serialized event object exceeds 7999 bytes in size"
)
await self._send.send(notification)
| AsyncpgEventBroker |
python | jazzband__django-pipeline | tests/tests/test_glob.py | {
"start": 271,
"end": 3621
} | class ____(TestCase):
def normpath(self, *parts):
return os.path.normpath(os.path.join(*parts))
def mktemp(self, *parts):
filename = self.normpath(*parts)
base, file = os.path.split(filename)
base = os.path.join(self.storage.location, base)
if not os.path.exists(base):
os.makedirs(base)
self.storage.save(filename, ContentFile(""))
def assertSequenceEqual(self, l1, l2):
self.assertEqual(set(l1), set(l2))
def setUp(self):
self.storage = FileSystemStorage(local_path("glob_dir"))
self.old_storage = glob.staticfiles_storage
glob.staticfiles_storage = self.storage
self.mktemp("a", "D")
self.mktemp("aab", "F")
self.mktemp("aaa", "zzzF")
self.mktemp("ZZZ")
self.mktemp("a", "bcd", "EF")
self.mktemp("a", "bcd", "efg", "ha")
def glob(self, *parts):
if len(parts) == 1:
pattern = parts[0]
else:
pattern = os.path.join(*parts)
return glob.glob(pattern)
def tearDown(self):
shutil.rmtree(self.storage.location)
glob.staticfiles_storage = self.old_storage
def test_glob_literal(self):
self.assertSequenceEqual(self.glob("a"), [self.normpath("a")])
self.assertSequenceEqual(self.glob("a", "D"), [self.normpath("a", "D")])
self.assertSequenceEqual(self.glob("aab"), [self.normpath("aab")])
def test_glob_one_directory(self):
self.assertSequenceEqual(
self.glob("a*"), map(self.normpath, ["a", "aab", "aaa"])
)
self.assertSequenceEqual(self.glob("*a"), map(self.normpath, ["a", "aaa"]))
self.assertSequenceEqual(self.glob("aa?"), map(self.normpath, ["aaa", "aab"]))
self.assertSequenceEqual(
self.glob("aa[ab]"), map(self.normpath, ["aaa", "aab"])
)
self.assertSequenceEqual(self.glob("*q"), [])
def test_glob_nested_directory(self):
if os.path.normcase("abCD") == "abCD":
# case-sensitive filesystem
self.assertSequenceEqual(
self.glob("a", "bcd", "E*"), [self.normpath("a", "bcd", "EF")]
)
else:
# case insensitive filesystem
self.assertSequenceEqual(
self.glob("a", "bcd", "E*"),
[self.normpath("a", "bcd", "EF"), self.normpath("a", "bcd", "efg")],
)
self.assertSequenceEqual(
self.glob("a", "bcd", "*g"), [self.normpath("a", "bcd", "efg")]
)
def test_glob_directory_names(self):
self.assertSequenceEqual(self.glob("*", "D"), [self.normpath("a", "D")])
self.assertSequenceEqual(self.glob("*", "*a"), [])
self.assertSequenceEqual(
self.glob("a", "*", "*", "*a"), [self.normpath("a", "bcd", "efg", "ha")]
)
self.assertSequenceEqual(
self.glob("?a?", "*F"),
map(self.normpath, [os.path.join("aaa", "zzzF"), os.path.join("aab", "F")]),
)
def test_glob_directory_with_trailing_slash(self):
# We are verifying that when there is wildcard pattern which
# ends with os.sep doesn't blow up.
paths = glob.glob("*" + os.sep)
self.assertEqual(len(paths), 4)
self.assertTrue(all(os.sep in path for path in paths))
| GlobTest |
python | allegroai__clearml | clearml/backend_api/services/v2_20/projects.py | {
"start": 142506,
"end": 144829
} | class ____(Response):
"""
Response of projects.merge endpoint.
:param moved_entities: The number of tasks and models moved from the merged
project into the destination
:type moved_entities: int
:param moved_projects: The number of child projects moved from the merged
project into the destination
:type moved_projects: int
"""
_service = "projects"
_action = "merge"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"moved_entities": {
"description": "The number of tasks, models and dataviews moved from the merged project into the destination",
"type": ["integer", "null"],
},
"moved_projects": {
"description": "The number of child projects moved from the merged project into the destination",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self, moved_entities: Optional[int] = None, moved_projects: Optional[int] = None, **kwargs: Any
) -> None:
super(MergeResponse, self).__init__(**kwargs)
self.moved_entities = moved_entities
self.moved_projects = moved_projects
@schema_property("moved_entities")
def moved_entities(self) -> Optional[int]:
return self._property_moved_entities
@moved_entities.setter
def moved_entities(self, value: Optional[int]) -> None:
if value is None:
self._property_moved_entities = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "moved_entities", six.integer_types)
self._property_moved_entities = value
@schema_property("moved_projects")
def moved_projects(self) -> Optional[int]:
return self._property_moved_projects
@moved_projects.setter
def moved_projects(self, value: Optional[int]) -> None:
if value is None:
self._property_moved_projects = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "moved_projects", six.integer_types)
self._property_moved_projects = value
| MergeResponse |
python | pandas-dev__pandas | pandas/tests/io/parser/conftest.py | {
"start": 2058,
"end": 2163
} | class ____(BaseParser):
engine = "c"
float_precision_choices = [None, "high", "round_trip"]
| CParser |
python | walkccc__LeetCode | solutions/447. Number of Boomerangs/447.py | {
"start": 0,
"end": 301
} | class ____:
def numberOfBoomerangs(self, points: list[list[int]]) -> int:
ans = 0
for x1, y1 in points:
count = collections.Counter()
for x2, y2 in points:
ans += 2 * count[(x1 - x2)**2 + (y1 - y2)**2]
count[(x1 - x2)**2 + (y1 - y2)**2] += 1
return ans
| Solution |
python | walkccc__LeetCode | solutions/2807. Insert Greatest Common Divisors in Linked List/2807.py | {
"start": 0,
"end": 289
} | class ____:
def insertGreatestCommonDivisors(
self, head: ListNode | None
) -> ListNode | None:
curr = head
while curr.next:
inserted = ListNode(math.gcd(curr.val, curr.next.val), curr.next)
curr.next = inserted
curr = inserted.next
return head
| Solution |
python | kevin1024__vcrpy | vcr/config.py | {
"start": 393,
"end": 11014
} | class ____:
@staticmethod
def is_test_method(method_name, function):
return method_name.startswith("test") and isinstance(function, types.FunctionType)
@staticmethod
def ensure_suffix(suffix):
def ensure(path):
if not path.endswith(suffix):
return path + suffix
return path
return ensure
def __init__(
self,
path_transformer=None,
before_record_request=None,
custom_patches=(),
filter_query_parameters=(),
ignore_hosts=(),
record_mode=RecordMode.ONCE,
ignore_localhost=False,
filter_headers=(),
before_record_response=None,
filter_post_data_parameters=(),
match_on=("method", "scheme", "host", "port", "path", "query"),
before_record=None,
inject_cassette=False,
serializer="yaml",
cassette_library_dir=None,
func_path_generator=None,
decode_compressed_response=False,
record_on_exception=True,
drop_unused_requests=False,
):
self.serializer = serializer
self.match_on = match_on
self.cassette_library_dir = cassette_library_dir
self.serializers = {"yaml": yamlserializer, "json": jsonserializer}
self.matchers = {
"method": matchers.method,
"uri": matchers.uri,
"url": matchers.uri, # matcher for backwards compatibility
"scheme": matchers.scheme,
"host": matchers.host,
"port": matchers.port,
"path": matchers.path,
"query": matchers.query,
"headers": matchers.headers,
"raw_body": matchers.raw_body,
"body": matchers.body,
}
self.persister = FilesystemPersister
self.record_mode = record_mode
self.filter_headers = filter_headers
self.filter_query_parameters = filter_query_parameters
self.filter_post_data_parameters = filter_post_data_parameters
self.before_record_request = before_record_request or before_record
self.before_record_response = before_record_response
self.ignore_hosts = ignore_hosts
self.ignore_localhost = ignore_localhost
self.inject_cassette = inject_cassette
self.path_transformer = path_transformer
self.func_path_generator = func_path_generator
self.decode_compressed_response = decode_compressed_response
self.record_on_exception = record_on_exception
self._custom_patches = tuple(custom_patches)
self.drop_unused_requests = drop_unused_requests
def _get_serializer(self, serializer_name):
try:
serializer = self.serializers[serializer_name]
except KeyError:
raise KeyError(f"Serializer {serializer_name} doesn't exist or isn't registered") from None
return serializer
def _get_matchers(self, matcher_names):
matchers = []
try:
for m in matcher_names:
matchers.append(self.matchers[m])
except KeyError:
raise KeyError(f"Matcher {m} doesn't exist or isn't registered") from None
return matchers
def use_cassette(self, path=None, **kwargs):
if path is not None and not isinstance(path, (str, Path)):
function = path
# Assume this is an attempt to decorate a function
return self._use_cassette(**kwargs)(function)
return self._use_cassette(path=path, **kwargs)
def _use_cassette(self, with_current_defaults=False, **kwargs):
if with_current_defaults:
config = self.get_merged_config(**kwargs)
return Cassette.use(**config)
# This is made a function that evaluates every time a cassette
# is made so that changes that are made to this VCR instance
# that occur AFTER the `use_cassette` decorator is applied
# still affect subsequent calls to the decorated function.
args_getter = functools.partial(self.get_merged_config, **kwargs)
return Cassette.use_arg_getter(args_getter)
def get_merged_config(self, **kwargs):
serializer_name = kwargs.get("serializer", self.serializer)
matcher_names = kwargs.get("match_on", self.match_on)
path_transformer = kwargs.get("path_transformer", self.path_transformer)
func_path_generator = kwargs.get("func_path_generator", self.func_path_generator)
cassette_library_dir = kwargs.get("cassette_library_dir", self.cassette_library_dir)
additional_matchers = kwargs.get("additional_matchers", ())
record_on_exception = kwargs.get("record_on_exception", self.record_on_exception)
if cassette_library_dir:
def add_cassette_library_dir(path):
if not path.startswith(cassette_library_dir):
return os.path.join(cassette_library_dir, path)
return path
path_transformer = compose(add_cassette_library_dir, path_transformer)
elif not func_path_generator:
# If we don't have a library dir, use the functions
# location to build a full path for cassettes.
func_path_generator = self._build_path_from_func_using_module
merged_config = {
"serializer": self._get_serializer(serializer_name),
"persister": self.persister,
"match_on": self._get_matchers(tuple(matcher_names) + tuple(additional_matchers)),
"record_mode": kwargs.get("record_mode", self.record_mode),
"before_record_request": self._build_before_record_request(kwargs),
"before_record_response": self._build_before_record_response(kwargs),
"custom_patches": self._custom_patches + kwargs.get("custom_patches", ()),
"inject": kwargs.get("inject_cassette", self.inject_cassette),
"path_transformer": path_transformer,
"func_path_generator": func_path_generator,
"allow_playback_repeats": kwargs.get("allow_playback_repeats", False),
"record_on_exception": record_on_exception,
"drop_unused_requests": kwargs.get("drop_unused_requests", self.drop_unused_requests),
}
path = kwargs.get("path")
if path:
merged_config["path"] = path
return merged_config
def _build_before_record_response(self, options):
before_record_response = options.get("before_record_response", self.before_record_response)
decode_compressed_response = options.get(
"decode_compressed_response",
self.decode_compressed_response,
)
filter_functions = []
if decode_compressed_response:
filter_functions.append(filters.decode_response)
if before_record_response:
if not isinstance(before_record_response, collections_abc.Iterable):
before_record_response = (before_record_response,)
filter_functions.extend(before_record_response)
def before_record_response(response):
for function in filter_functions:
if response is None:
break
response = function(response)
return response
return before_record_response
def _build_before_record_request(self, options):
filter_functions = []
filter_headers = options.get("filter_headers", self.filter_headers)
filter_query_parameters = options.get("filter_query_parameters", self.filter_query_parameters)
filter_post_data_parameters = options.get(
"filter_post_data_parameters",
self.filter_post_data_parameters,
)
before_record_request = options.get(
"before_record_request",
options.get("before_record", self.before_record_request),
)
ignore_hosts = options.get("ignore_hosts", self.ignore_hosts)
ignore_localhost = options.get("ignore_localhost", self.ignore_localhost)
if filter_headers:
replacements = [h if isinstance(h, tuple) else (h, None) for h in filter_headers]
filter_functions.append(functools.partial(filters.replace_headers, replacements=replacements))
if filter_query_parameters:
replacements = [p if isinstance(p, tuple) else (p, None) for p in filter_query_parameters]
filter_functions.append(
functools.partial(filters.replace_query_parameters, replacements=replacements),
)
if filter_post_data_parameters:
replacements = [p if isinstance(p, tuple) else (p, None) for p in filter_post_data_parameters]
filter_functions.append(
functools.partial(filters.replace_post_data_parameters, replacements=replacements),
)
hosts_to_ignore = set(ignore_hosts)
if ignore_localhost:
hosts_to_ignore.update(("localhost", "0.0.0.0", "127.0.0.1"))
if hosts_to_ignore:
filter_functions.append(self._build_ignore_hosts(hosts_to_ignore))
if before_record_request:
if not isinstance(before_record_request, collections_abc.Iterable):
before_record_request = (before_record_request,)
filter_functions.extend(before_record_request)
def before_record_request(request):
request = copy.deepcopy(request)
for function in filter_functions:
if request is None:
break
request = function(request)
return request
return before_record_request
@staticmethod
def _build_ignore_hosts(hosts_to_ignore):
def filter_ignored_hosts(request):
if hasattr(request, "host") and request.host in hosts_to_ignore:
return
return request
return filter_ignored_hosts
@staticmethod
def _build_path_from_func_using_module(function):
return os.path.join(os.path.dirname(inspect.getfile(function)), function.__name__)
def register_serializer(self, name, serializer):
self.serializers[name] = serializer
def register_matcher(self, name, matcher):
self.matchers[name] = matcher
def register_persister(self, persister):
# Singleton, no name required
self.persister = persister
def test_case(self, predicate=None):
predicate = predicate or self.is_test_method
metaclass = auto_decorate(self.use_cassette, predicate)
return metaclass("temporary_class", (), {})
| VCR |
python | wandb__wandb | wandb/automations/events.py | {
"start": 3465,
"end": 3938
} | class ____(GQLBase): # from: RunMetricFilter
event_type: Annotated[
Literal[EventType.RUN_METRIC_ZSCORE],
Field(exclude=True, repr=False),
] = EventType.RUN_METRIC_ZSCORE
zscore_filter: MetricZScoreFilter
@model_validator(mode="before")
@classmethod
def _nest_inner_filter(cls, v: Any) -> Any:
if pydantic_isinstance(v, MetricZScoreFilter):
return cls(zscore_filter=v)
return v
| _WrappedMetricZScoreFilter |
python | python-attrs__attrs | bench/test_benchmarks.py | {
"start": 2499,
"end": 2968
} | class ____:
a: int = 0
b: Ellipsis = ...
c: str = "foo"
d: tuple[str] = "bar"
e: complex = complex()
def test_asdict_atomic():
"""
Benchmark atomic-only instances.
"""
c = AtomicFields()
ad = attrs.asdict
for _ in range(ROUNDS):
ad(c)
def test_astuple_atomic():
"""
Benchmark atomic-only instances.
"""
c = AtomicFields()
at = attrs.astuple
for _ in range(ROUNDS):
at(c)
| AtomicFields |
python | apache__airflow | providers/apache/spark/src/airflow/providers/apache/spark/hooks/spark_sql.py | {
"start": 1193,
"end": 8269
} | class ____(BaseHook):
"""
This hook is a wrapper around the spark-sql binary; requires the "spark-sql" binary to be in the PATH.
:param sql: The SQL query to execute
:param conf: arbitrary Spark configuration property
:param conn_id: connection_id string
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
(Default: all the available cores on the worker)
:param executor_cores: (Standalone & YARN only) Number of cores per
executor (Default: 2)
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:param keytab: Full path to the file that contains the keytab
:param master: spark://host:port, mesos://host:port, yarn, or local
(Default: The ``host`` and ``port`` set in the Connection, or ``"yarn"``)
:param name: Name of the job.
:param num_executors: Number of executors to launch
:param verbose: Whether to pass the verbose flag to spark-sql
:param yarn_queue: The YARN queue to submit to
(Default: The ``queue`` value set in the Connection, or ``"default"``)
"""
conn_name_attr = "conn_id"
default_conn_name = "spark_sql_default"
conn_type = "spark_sql"
hook_name = "Spark SQL"
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom UI field behaviour for Spark SQL connection."""
return {
"hidden_fields": ["schema", "login", "password", "extra"],
"relabeling": {},
}
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to Spark SQL connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
from wtforms.validators import Optional
return {
"queue": StringField(
lazy_gettext("YARN queue"),
widget=BS3TextFieldWidget(),
description="Default YARN queue to use",
validators=[Optional()],
)
}
def __init__(
self,
sql: str,
conf: dict[str, Any] | str | None = None,
conn_id: str = default_conn_name,
total_executor_cores: int | None = None,
executor_cores: int | None = None,
executor_memory: str | None = None,
keytab: str | None = None,
principal: str | None = None,
master: str | None = None,
name: str = "default-name",
num_executors: int | None = None,
verbose: bool = True,
yarn_queue: str | None = None,
) -> None:
super().__init__()
options: dict = {}
conn: Connection | None = None
try:
conn = self.get_connection(conn_id)
except AirflowNotFoundException:
conn = None
if conn:
options = conn.extra_dejson
# Set arguments to values set in Connection if not explicitly provided.
if master is None:
if conn is None:
master = "yarn"
elif conn.port:
master = f"{conn.host}:{conn.port}"
else:
master = conn.host
if yarn_queue is None:
yarn_queue = options.get("queue", "default")
self._sql = sql
self._conf = conf
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._keytab = keytab
self._principal = principal
self._master = master
self._name = name
self._num_executors = num_executors
self._verbose = verbose
self._yarn_queue = yarn_queue
self._sp: Any = None
def get_conn(self) -> Any:
pass
def _prepare_command(self, cmd: str | list[str]) -> list[str]:
"""
Construct the spark-sql command to execute. Verbose output is enabled as default.
:param cmd: command to append to the spark-sql command
:return: full command to be executed
"""
connection_cmd = ["spark-sql"]
if self._conf:
conf = self._conf
if isinstance(conf, dict):
for key, value in conf.items():
connection_cmd += ["--conf", f"{key}={value}"]
elif isinstance(conf, str):
for conf_el in conf.split(","):
connection_cmd += ["--conf", conf_el]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._sql:
sql = self._sql.strip()
if sql.endswith((".sql", ".hql")):
connection_cmd += ["-f", sql]
else:
connection_cmd += ["-e", sql]
if self._master:
connection_cmd += ["--master", self._master]
if self._name:
connection_cmd += ["--name", self._name]
if self._verbose:
connection_cmd += ["--verbose"]
if self._yarn_queue:
connection_cmd += ["--queue", self._yarn_queue]
if isinstance(cmd, str):
connection_cmd += cmd.split()
elif isinstance(cmd, list):
connection_cmd += cmd
else:
raise AirflowException(f"Invalid additional command: {cmd}")
self.log.debug("Spark-Sql cmd: %s", connection_cmd)
return connection_cmd
def run_query(self, cmd: str = "", **kwargs: Any) -> None:
"""
Remote Popen (actually execute the Spark-sql query).
:param cmd: command to append to the spark-sql command
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_sql_cmd = self._prepare_command(cmd)
self._sp = subprocess.Popen(
spark_sql_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, **kwargs
)
for line in iter(self._sp.stdout):
self.log.info(line)
returncode = self._sp.wait()
if returncode:
raise AirflowException(
f"Cannot execute '{self._sql}' on {self._master} (additional parameters: '{cmd}'). "
f"Process exit code: {returncode}."
)
def kill(self) -> None:
"""Kill Spark job."""
if self._sp and self._sp.poll() is None:
self.log.info("Killing the Spark-Sql job")
self._sp.kill()
| SparkSqlHook |
python | sqlalchemy__sqlalchemy | test/ext/test_orderinglist.py | {
"start": 13951,
"end": 14078
} | class ____:
def __init__(self, value):
self.value = value
def __index__(self):
return self.value
| MockIndex |
python | walkccc__LeetCode | solutions/3291. Minimum Number of Valid Strings to Form Target I/3291.py | {
"start": 0,
"end": 1047
} | class ____:
def minValidStrings(self, words: list[str], target: str) -> int:
ans = 0
unmatchedPrefix = len(target)
lpsList = [self._getLPS(word + '#' + target) for word in words]
while unmatchedPrefix > 0:
# Greedily choose the word that has the longest suffix match with the
# remaining unmatched prefix.
maxMatchSuffix = 0
for lps, word in zip(lpsList, words):
maxMatchSuffix = max(maxMatchSuffix, lps[len(word) + unmatchedPrefix])
if maxMatchSuffix == 0:
return -1
ans += 1
unmatchedPrefix -= maxMatchSuffix
return ans
def _getLPS(self, pattern: str) -> list[int]:
"""
Returns the lps array, where lps[i] is the length of the longest prefix of
pattern[0..i] which is also a suffix of this substring.
"""
lps = [0] * len(pattern)
j = 0
for i in range(1, len(pattern)):
while j > 0 and pattern[j] != pattern[i]:
j = lps[j - 1]
if pattern[i] == pattern[j]:
lps[i] = j + 1
j += 1
return lps
| Solution |
python | getsentry__sentry | tests/apidocs/endpoints/events/test_project_event_details.py | {
"start": 136,
"end": 917
} | class ____(APIDocsTestCase):
endpoint = "sentry-api-0-project-event-details"
def setUp(self) -> None:
self.create_event("a")
event = self.create_event("b")
self.create_event("c")
self.create_event("d", fingerprint=["group-2"])
self.url = reverse(
self.endpoint,
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": event.event_id,
},
)
self.login_as(user=self.user)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
| ProjectEventDetailsDocs |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 16692,
"end": 16750
} | class ____(SingleAggregation):
groupby_chunk = M.max
| Max |
python | huggingface__transformers | tests/models/hgnet_v2/test_modeling_hgnet_v2.py | {
"start": 6417,
"end": 9744
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some tests of test_modeling_common.py, as TextNet does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (HGNetV2ForImageClassification, HGNetV2Backbone) if is_torch_available() else ()
pipeline_model_mapping = {"image-classification": HGNetV2ForImageClassification} if is_torch_available() else {}
test_resize_embeddings = False
test_torch_exportable = True
has_attentions = False
def setUp(self):
self.model_tester = HGNetV2ModelTester(self)
@unittest.skip(reason="HGNetV2 does not output attentions")
def test_attention_outputs(self):
pass
@unittest.skip(reason="HGNetV2 does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="HGNetV2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="HGNetV2 does not support input and output embeddings")
def test_model_common_attributes(self):
pass
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
self.assertEqual(len(hidden_states), self.model_tester.num_stages + 1)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
layers_type = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
config.layer_type = layer_type
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="Retain_grad is not supposed to be tested")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="TextNet does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@unittest.skip(reason="HGNetV2 does not use model")
def test_model_from_pretrained(self):
pass
| HGNetV2ForImageClassificationTest |
python | walkccc__LeetCode | solutions/2003. Smallest Missing Genetic Value in Each Subtree/2003.py | {
"start": 0,
"end": 947
} | class ____:
def smallestMissingValueSubtree(
self,
parents: list[int],
nums: list[int],
) -> list[int]:
n = len(parents)
ans = [1] * n
tree = [[] for _ in range(n)]
seen = set()
minMiss = 1
for i in range(1, n):
tree[parents[i]].append(i)
def getNode(nums: list[int]) -> int:
for i, num in enumerate(nums):
if num == 1:
return i
return -1
nodeThatsOne = getNode(nums)
if nodeThatsOne == -1:
return ans
u = nodeThatsOne
prev = -1 # the u that just handled
def dfs(u: int) -> None:
seen.add(nums[u])
for v in tree[u]:
dfs(v)
# Upward from `nodeThatsOne` to the root `u`.
while u != -1:
for v in tree[u]:
if v != prev:
dfs(v)
seen.add(nums[u])
while minMiss in seen:
minMiss += 1
ans[u] = minMiss
prev = u
u = parents[u]
return ans
| Solution |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 102357,
"end": 103453
} | class ____:
xlRDIAll = 99 # from enum XlRemoveDocInfoType
xlRDIComments = 1 # from enum XlRemoveDocInfoType
xlRDIContentType = 16 # from enum XlRemoveDocInfoType
xlRDIDefinedNameComments = 18 # from enum XlRemoveDocInfoType
xlRDIDocumentManagementPolicy = 15 # from enum XlRemoveDocInfoType
xlRDIDocumentProperties = 8 # from enum XlRemoveDocInfoType
xlRDIDocumentServerProperties = 14 # from enum XlRemoveDocInfoType
xlRDIDocumentWorkspace = 10 # from enum XlRemoveDocInfoType
xlRDIEmailHeader = 5 # from enum XlRemoveDocInfoType
xlRDIInactiveDataConnections = 19 # from enum XlRemoveDocInfoType
xlRDIInkAnnotations = 11 # from enum XlRemoveDocInfoType
xlRDIPrinterPath = 20 # from enum XlRemoveDocInfoType
xlRDIPublishInfo = 13 # from enum XlRemoveDocInfoType
xlRDIRemovePersonalInformation = 4 # from enum XlRemoveDocInfoType
xlRDIRoutingSlip = 6 # from enum XlRemoveDocInfoType
xlRDIScenarioComments = 12 # from enum XlRemoveDocInfoType
xlRDISendForReview = 7 # from enum XlRemoveDocInfoType
| RemoveDocInfoType |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/unitofwork.py | {
"start": 19433,
"end": 20973
} | class ____(_IterateMappersMixin, _PostSortRec):
__slots__ = "dependency_processor", "isdelete", "fromparent", "sort_key"
def __init__(self, uow, dependency_processor, isdelete, fromparent):
self.dependency_processor = dependency_processor
self.sort_key = (
"ProcessAll",
self.dependency_processor.sort_key,
isdelete,
)
self.isdelete = isdelete
self.fromparent = fromparent
uow.deps[dependency_processor.parent.base_mapper].add(
dependency_processor
)
def execute(self, uow):
states = self._elements(uow)
if self.isdelete:
self.dependency_processor.process_deletes(uow, states)
else:
self.dependency_processor.process_saves(uow, states)
def per_state_flush_actions(self, uow):
# this is handled by SaveUpdateAll and DeleteAll,
# since a ProcessAll should unconditionally be pulled
# into per-state if either the parent/child mappers
# are part of a cycle
return iter([])
def __repr__(self):
return "%s(%s, isdelete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
self.isdelete,
)
def _elements(self, uow):
for mapper in self._mappers(uow):
for state in uow.mappers[mapper]:
(isdelete, listonly) = uow.states[state]
if isdelete == self.isdelete and not listonly:
yield state
| _ProcessAll |
python | TheAlgorithms__Python | scheduling/job_sequence_with_deadline.py | {
"start": 704,
"end": 1895
} | class ____:
task_id: int
deadline: int
reward: int
def max_tasks(tasks_info: list[tuple[int, int]]) -> list[int]:
"""
Create a list of Task objects that are sorted so the highest rewards come first.
Return a list of those task ids that can be completed before i becomes too high.
>>> max_tasks([(4, 20), (1, 10), (1, 40), (1, 30)])
[2, 0]
>>> max_tasks([(1, 10), (2, 20), (3, 30), (2, 40)])
[3, 2]
>>> max_tasks([(9, 10)])
[0]
>>> max_tasks([(-9, 10)])
[]
>>> max_tasks([])
[]
>>> max_tasks([(0, 10), (0, 20), (0, 30), (0, 40)])
[]
>>> max_tasks([(-1, 10), (-2, 20), (-3, 30), (-4, 40)])
[]
"""
tasks = sorted(
(
Task(task_id, deadline, reward)
for task_id, (deadline, reward) in enumerate(tasks_info)
),
key=attrgetter("reward"),
reverse=True,
)
return [task.task_id for i, task in enumerate(tasks, start=1) if task.deadline >= i]
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{max_tasks([(4, 20), (1, 10), (1, 40), (1, 30)]) = }")
print(f"{max_tasks([(1, 10), (2, 20), (3, 30), (2, 40)]) = }")
| Task |
python | scrapy__scrapy | tests/test_downloadermiddleware_httpauth.py | {
"start": 267,
"end": 378
} | class ____(Spider):
http_user = "foo"
http_pass = "bar"
http_auth_domain = "example.com"
| DomainSpider |
python | apache__avro | lang/py/avro/test/test_io.py | {
"start": 20076,
"end": 27082
} | class ____(unittest.TestCase):
def test_decimal_bytes_small_scale(self) -> None:
"""Avro should raise an AvroTypeException when attempting to write a decimal with a larger exponent than the schema's scale."""
datum = decimal.Decimal("3.1415")
_, _, exp = datum.as_tuple()
scale = -1 * int(exp) - 1
schema = avro.schema.parse(
json.dumps(
{
"type": "bytes",
"logicalType": "decimal",
"precision": 5,
"scale": scale,
}
)
)
self.assertRaises(avro.errors.AvroOutOfScaleException, write_datum, datum, schema)
def test_decimal_fixed_small_scale(self) -> None:
"""Avro should raise an AvroTypeException when attempting to write a decimal with a larger exponent than the schema's scale."""
datum = decimal.Decimal("3.1415")
_, _, exp = datum.as_tuple()
scale = -1 * int(exp) - 1
schema = avro.schema.parse(
json.dumps(
{
"type": "fixed",
"logicalType": "decimal",
"name": "Test",
"size": 8,
"precision": 5,
"scale": scale,
}
)
)
self.assertRaises(avro.errors.AvroOutOfScaleException, write_datum, datum, schema)
def test_unknown_symbol(self) -> None:
datum_to_write = "FOO"
writers_schema = avro.schema.parse(json.dumps({"type": "enum", "name": "Test", "symbols": ["FOO", "BAR"]}))
readers_schema = avro.schema.parse(json.dumps({"type": "enum", "name": "Test", "symbols": ["BAR", "BAZ"]}))
writer, encoder, datum_writer = write_datum(datum_to_write, writers_schema)
reader = io.BytesIO(writer.getvalue())
decoder = avro.io.BinaryDecoder(reader)
datum_reader = avro.io.DatumReader(writers_schema, readers_schema)
self.assertRaises(avro.errors.SchemaResolutionException, datum_reader.read, decoder)
def test_no_default_value(self) -> None:
writers_schema = LONG_RECORD_SCHEMA
datum_to_write = LONG_RECORD_DATUM
readers_schema = avro.schema.parse(
json.dumps(
{
"type": "record",
"name": "Test",
"fields": [{"name": "H", "type": "int"}],
}
)
)
writer, encoder, datum_writer = write_datum(datum_to_write, writers_schema)
reader = io.BytesIO(writer.getvalue())
decoder = avro.io.BinaryDecoder(reader)
datum_reader = avro.io.DatumReader(writers_schema, readers_schema)
self.assertRaises(avro.errors.SchemaResolutionException, datum_reader.read, decoder)
def test_projection(self) -> None:
writers_schema = LONG_RECORD_SCHEMA
datum_to_write = LONG_RECORD_DATUM
readers_schema = avro.schema.parse(
json.dumps(
{
"type": "record",
"name": "Test",
"fields": [
{"name": "E", "type": "int"},
{"name": "F", "type": "int"},
],
}
)
)
datum_to_read = {"E": 5, "F": 6}
writer, encoder, datum_writer = write_datum(datum_to_write, writers_schema)
datum_read = read_datum(writer, writers_schema, readers_schema)
self.assertEqual(datum_to_read, datum_read)
def test_field_order(self) -> None:
writers_schema = LONG_RECORD_SCHEMA
datum_to_write = LONG_RECORD_DATUM
readers_schema = avro.schema.parse(
json.dumps(
{
"type": "record",
"name": "Test",
"fields": [
{"name": "F", "type": "int"},
{"name": "E", "type": "int"},
],
}
)
)
datum_to_read = {"E": 5, "F": 6}
writer, encoder, datum_writer = write_datum(datum_to_write, writers_schema)
datum_read = read_datum(writer, writers_schema, readers_schema)
self.assertEqual(datum_to_read, datum_read)
def test_type_exception_int(self) -> None:
writers_schema = avro.schema.parse(
json.dumps(
{
"type": "record",
"name": "Test",
"fields": [
{"name": "F", "type": "int"},
{"name": "E", "type": "int"},
],
}
)
)
datum_to_write = {"E": 5, "F": "Bad"}
with self.assertRaises(avro.errors.AvroTypeException) as exc:
write_datum(datum_to_write, writers_schema)
assert str(exc.exception) == 'The datum "Bad" provided for "F" is not an example of the schema "int"'
def test_type_exception_long(self) -> None:
writers_schema = avro.schema.parse(json.dumps({"type": "record", "name": "Test", "fields": [{"name": "foo", "type": "long"}]}))
datum_to_write = {"foo": 5.0}
with self.assertRaises(avro.errors.AvroTypeException) as exc:
write_datum(datum_to_write, writers_schema)
assert str(exc.exception) == 'The datum "5.0" provided for "foo" is not an example of the schema "long"'
def test_type_exception_record(self) -> None:
writers_schema = avro.schema.parse(json.dumps({"type": "record", "name": "Test", "fields": [{"name": "foo", "type": "long"}]}))
datum_to_write = ("foo", 5.0)
with self.assertRaisesRegex(avro.errors.AvroTypeException, r"The datum \".*\" provided for \".*\" is not an example of the schema [\s\S]*"):
write_datum(datum_to_write, writers_schema)
def load_tests(loader: unittest.TestLoader, default_tests: None, pattern: None) -> unittest.TestSuite:
"""Generate test cases across many test schema."""
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(TestMisc))
suite.addTests(IoValidateTestCase(schema_str, datum) for schema_str, datum in SCHEMAS_TO_VALIDATE)
suite.addTests(RoundTripTestCase(schema_str, datum) for schema_str, datum in SCHEMAS_TO_VALIDATE)
for skip in False, True:
for type_ in "int", "long":
suite.addTests(BinaryEncodingTestCase(skip, type_, datum, hex_) for datum, hex_ in BINARY_ENCODINGS)
suite.addTests(
SchemaPromotionTestCase(write_type, read_type) for write_type, read_type in itertools.combinations(("int", "long", "float", "double"), 2)
)
suite.addTests(DefaultValueTestCase(field_type, default) for field_type, default in DEFAULT_VALUE_EXAMPLES)
suite.addTests(loader.loadTestsFromTestCase(TestIncompatibleSchemaReading))
return suite
if __name__ == "__main__": # pragma: no coverage
unittest.main()
| TestMisc |
python | django__django | tests/admin_changelist/admin.py | {
"start": 1001,
"end": 1175
} | class ____(admin.ModelAdmin):
list_filter = ["child__name"]
search_fields = ["child__name", "child__age"]
list_select_related = ["child"]
| ParentAdminTwoSearchFields |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.