language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/2456. Most Popular Video Creator/2456.py | {
"start": 282,
"end": 1168
} | class ____:
def mostPopularCreator(self, creators: list[str],
ids: list[str],
views: list[int]) -> list[list[str]]:
ans = []
maxPopularity = 0
nameToCreator = {}
for name, id, view in zip(creators, ids, views):
if name not in nameToCreator:
nameToCreator[name] = Creator(view, id, view)
maxPopularity = max(maxPopularity, view)
continue
creator = nameToCreator[name]
creator.popularity += view
maxPopularity = max(maxPopularity, creator.popularity)
if (creator.maxView < view or
creator.maxView == view and creator.videoId > id):
creator.videoId = id
creator.maxView = view
for name, creator in nameToCreator.items():
if creator.popularity == maxPopularity:
ans.append([name, creator.videoId])
return ans
| Solution |
python | getsentry__sentry | src/sentry/testutils/helpers/datetime.py | {
"start": 458,
"end": 899
} | class ____:
"""Returns a distinct, increasing timestamp each time it is called."""
def __init__(self, initial=None):
self.time = initial or datetime.now(UTC)
def __call__(self):
self.time += timedelta(seconds=1)
return self.time
def freeze_time(t: str | datetime | None = None) -> time_machine.travel:
if t is None:
t = datetime.now(UTC)
return time_machine.travel(t, tick=False)
| MockClock |
python | joke2k__faker | faker/providers/automotive/az_AZ/__init__.py | {
"start": 53,
"end": 1774
} | class ____(AutoProvider):
"""Implement license formats for ``az_AZ`` locale."""
license_formats = ("##-??-###",)
ascii_uppercase_azerbaijan = "ABCDEFGHXIJKQLMNOPRSTUVYZ"
license_plate_initial_numbers = (
"01",
"02",
"03",
"04",
"05",
"06",
"07",
"08",
"09",
"10",
"90",
"11",
"12",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"27",
"28",
"29",
"30",
"31",
"32",
"33",
"34",
"35",
"36",
"37",
"38",
"39",
"40",
"41",
"42",
"43",
"44",
"45",
"46",
"47",
"48",
"49",
"50",
"51",
"52",
"53",
"54",
"55",
"56",
"57",
"58",
"59",
"60",
"61",
"62",
"63",
"64",
"65",
"66",
"67",
"68",
"69",
"70",
"71",
"72",
"77",
"85",
)
def license_plate(self) -> str:
"""Generate a license plate."""
temp = re.sub(
r"\?",
lambda x: self.random_element(self.ascii_uppercase_azerbaijan),
self.random_element(self.license_formats),
)
temp = temp.replace("##", self.random_element(self.license_plate_initial_numbers), 1)
# temp = temp.format(self.random_element(range(1, 999)))
return self.numerify(temp)
| Provider |
python | getsentry__sentry | src/sentry/integrations/models/data_forwarder.py | {
"start": 187,
"end": 1226
} | class ____(DefaultFieldsModel):
"""
Configuration for data forwarding to external services (Segment, SQS, or Splunk).
This is a general config that can apply to specific projects or all projects in an organization.
"""
__relocation_scope__ = RelocationScope.Organization
organization = FlexibleForeignKey("sentry.Organization", on_delete=models.CASCADE)
is_enabled = models.BooleanField(default=True)
enroll_new_projects = models.BooleanField(default=False)
enrolled_projects = models.ManyToManyField(
"sentry.Project", through="sentry.DataForwarderProject", related_name="data_forwarders"
)
provider = models.CharField(
max_length=64,
choices=[
("segment", "Segment"),
("sqs", "Amazon SQS"),
("splunk", "Splunk"),
],
)
config = models.JSONField(default=dict)
class Meta:
app_label = "sentry"
db_table = "sentry_dataforwarder"
unique_together = (("organization", "provider"),)
| DataForwarder |
python | jazzband__tablib | src/tablib/formats/_xlsx.py | {
"start": 423,
"end": 7068
} | class ____:
title = 'xlsx'
extensions = ('xlsx',)
@classmethod
def detect(cls, stream):
"""Returns True if given stream is a readable excel file."""
try:
# No need to fully load the file, it should be enough to be able to
# read the manifest.
reader = ExcelReader(stream, read_only=False)
reader.read_manifest()
return True
except Exception:
return False
@classmethod
def export_set(cls, dataset, freeze_panes=True, invalid_char_subst="-",
escape=False, column_width="adaptive"):
"""Returns XLSX representation of Dataset.
If ``freeze_panes`` is True, Export will freeze panes only after first line.
If ``dataset.title`` contains characters which are
considered invalid for an XLSX file sheet name
(https://web.archive.org/web/20230323081941/https://www.excelcodex.com/2012/06/worksheets-naming-conventions/),
they will be replaced with ``invalid_char_subst``.
If ``escape`` is True, formulae will have the leading '=' character removed.
This is a security measure to prevent formulae from executing by default
in exported XLSX files.
If ``column_width`` is set to "adaptive", the column width will be set to the maximum
width of the content in each column. If it is set to an integer, the column width will be
set to that integer value. If it is set to None, the column width will be set as the
default openpyxl.Worksheet width value.
"""
wb = Workbook()
ws = wb.worksheets[0]
ws.title = (
safe_xlsx_sheet_title(dataset.title, invalid_char_subst)
if dataset.title else 'Tablib Dataset'
)
cls.dset_sheet(dataset, ws, freeze_panes=freeze_panes, escape=escape)
cls._adapt_column_width(ws, column_width)
stream = BytesIO()
wb.save(stream)
return stream.getvalue()
@classmethod
def export_book(cls, databook, freeze_panes=True, invalid_char_subst="-",
escape=False, column_width=None):
"""Returns XLSX representation of DataBook.
See export_set().
"""
wb = Workbook()
for sheet in wb.worksheets:
wb.remove(sheet)
for i, dset in enumerate(databook._datasets):
ws = wb.create_sheet()
ws.title = (
safe_xlsx_sheet_title(dset.title, invalid_char_subst)
if dset.title else f"Sheet{i}"
)
cls.dset_sheet(dset, ws, freeze_panes=freeze_panes, escape=escape)
cls._adapt_column_width(ws, column_width)
stream = BytesIO()
wb.save(stream)
return stream.getvalue()
@classmethod
def import_sheet(cls, dset, sheet, headers=True, skip_lines=0):
"""Populates dataset with sheet."""
dset.title = sheet.title
for i, row in enumerate(sheet.rows):
if i < skip_lines:
continue
row_vals = [c.value for c in row]
if i == skip_lines and headers:
dset.headers = row_vals
else:
if i > skip_lines and len(row_vals) < dset.width:
row_vals += [''] * (dset.width - len(row_vals))
dset.append(row_vals)
@classmethod
def import_set(cls, dset, in_stream, headers=True, read_only=True, skip_lines=0):
"""Returns databook from XLS stream."""
dset.wipe()
xls_book = load_workbook(in_stream, read_only=read_only, data_only=True)
sheet = xls_book.active
cls.import_sheet(dset, sheet, headers, skip_lines)
@classmethod
def import_book(cls, dbook, in_stream, headers=True, read_only=True):
"""Returns databook from XLS stream."""
dbook.wipe()
xls_book = load_workbook(in_stream, read_only=read_only, data_only=True)
for sheet in xls_book.worksheets:
dset = tablib.Dataset()
cls.import_sheet(dset, sheet, headers)
dbook.add_sheet(dset)
@classmethod
def dset_sheet(cls, dataset, ws, freeze_panes=True, escape=False):
"""Completes given worksheet from given Dataset."""
_package = dataset._package(dicts=False)
for i, sep in enumerate(dataset._separators):
_offset = i
_package.insert((sep[0] + _offset), (sep[1],))
bold = Font(bold=True)
wrap_text = Alignment(wrap_text=True)
for i, row in enumerate(_package):
row_number = i + 1
for j, col in enumerate(row):
col_idx = get_column_letter(j + 1)
cell = ws[f'{col_idx}{row_number}']
# bold headers
if (row_number == 1) and dataset.headers:
cell.font = bold
if freeze_panes:
# Export Freeze only after first Line
ws.freeze_panes = 'A2'
# bold separators
elif len(row) < dataset.width:
cell.font = bold
# wrap the rest
else:
if '\n' in str(col):
cell.alignment = wrap_text
try:
cell.value = col
except ValueError:
cell.value = str(col)
if escape and cell.data_type == 'f' and cell.value.startswith('='):
cell.value = cell.value.replace("=", "")
@classmethod
def _adapt_column_width(cls, worksheet, width):
if isinstance(width, str) and width != "adaptive":
msg = (
f"Invalid value for column_width: {width}. "
"Must be 'adaptive' or an integer."
)
raise ValueError(msg)
if width is None:
return
column_widths = []
if width == "adaptive":
for row in worksheet.values:
for i, cell in enumerate(row):
cell_width = len(str(cell))
if len(column_widths) > i:
if cell_width > column_widths[i]:
column_widths[i] = cell_width
else:
column_widths.append(cell_width)
else:
column_widths = [width] * worksheet.max_column
for i, column_width in enumerate(column_widths, 1): # start at 1
worksheet.column_dimensions[get_column_letter(i)].width = column_width
| XLSXFormat |
python | huggingface__transformers | src/transformers/tokenization_utils_base.py | {
"start": 4691,
"end": 5020
} | class ____(ExplicitEnum):
"""
Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in
an IDE.
"""
ONLY_FIRST = "only_first"
ONLY_SECOND = "only_second"
LONGEST_FIRST = "longest_first"
DO_NOT_TRUNCATE = "do_not_truncate"
| TruncationStrategy |
python | getsentry__sentry | src/sentry/deletions/defaults/sentry_app.py | {
"start": 180,
"end": 1160
} | class ____(ModelDeletionTask[SentryApp]):
def get_child_relations(self, instance: SentryApp) -> list[BaseRelation]:
from sentry.models.apiapplication import ApiApplication
from sentry.sentry_apps.models.sentry_app_installation import SentryAppInstallation
from sentry.users.models.user import User
return [
ModelRelation(SentryAppInstallation, {"sentry_app_id": instance.id}),
ModelRelation(User, {"id": instance.proxy_user_id}),
ModelRelation(ApiApplication, {"id": instance.application_id}),
]
def mark_deletion_in_progress(self, instance_list: Sequence[SentryApp]) -> None:
from sentry.constants import SentryAppStatus
for instance in instance_list:
status = getattr(instance, "status", None)
if status not in (SentryAppStatus.DELETION_IN_PROGRESS, None):
instance.update(status=SentryAppStatus.DELETION_IN_PROGRESS)
| SentryAppDeletionTask |
python | scikit-learn__scikit-learn | sklearn/_loss/link.py | {
"start": 2093,
"end": 4328
} | class ____(ABC):
"""Abstract base class for differentiable, invertible link functions.
Convention:
- link function g: raw_prediction = g(y_pred)
- inverse link h: y_pred = h(raw_prediction)
For (generalized) linear models, `raw_prediction = X @ coef` is the so
called linear predictor, and `y_pred = h(raw_prediction)` is the predicted
conditional (on X) expected value of the target `y_true`.
The methods are not implemented as staticmethods in case a link function needs
parameters.
"""
is_multiclass = False # used for testing only
# Usually, raw_prediction may be any real number and y_pred is an open
# interval.
# interval_raw_prediction = Interval(-np.inf, np.inf, False, False)
interval_y_pred = Interval(-np.inf, np.inf, False, False)
@abstractmethod
def link(self, y_pred, out=None):
"""Compute the link function g(y_pred).
The link function maps (predicted) target values to raw predictions,
i.e. `g(y_pred) = raw_prediction`.
Parameters
----------
y_pred : array
Predicted target values.
out : array
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned.
Returns
-------
out : array
Output array, element-wise link function.
"""
@abstractmethod
def inverse(self, raw_prediction, out=None):
"""Compute the inverse link function h(raw_prediction).
The inverse link function maps raw predictions to predicted target
values, i.e. `h(raw_prediction) = y_pred`.
Parameters
----------
raw_prediction : array
Raw prediction values (in link space).
out : array
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned.
Returns
-------
out : array
Output array, element-wise inverse link function.
"""
| BaseLink |
python | walkccc__LeetCode | solutions/2291. Maximum Profit From Trading Stocks/2291-2.py | {
"start": 0,
"end": 381
} | class ____:
def maximumProfit(
self,
present: list[int],
future: list[int],
budget: int,
) -> int:
# dp[i] := the maximum profit of buying present so far with i budget
dp = [0] * (budget + 1)
for p, f in zip(present, future):
for j in range(budget, p - 1, -1):
dp[j] = max(dp[j], f - p + dp[j - p])
return dp[budget]
| Solution |
python | bokeh__bokeh | src/bokeh/models/transforms.py | {
"start": 6047,
"end": 7975
} | class ____(Transform):
''' Base class for interpolator transforms.
Interpolators return the value of a function which has been evaluated
between specified (x, y) pairs of data. As an example, if two control
point pairs were provided to the interpolator, a linear interpolaction
at a specific value of 'x' would result in the value of 'y' which existed
on the line connecting the two control points.
The control point pairs for the interpolators can be specified through either
* A literal sequence of values:
.. code-block:: python
interp = Interpolator(x=[1, 2, 3, 4, 5], y=[2, 5, 10, 12, 16])
* or a pair of columns defined in a ``ColumnDataSource`` object:
.. code-block:: python
interp = Interpolator(x="year", y="earnings", data=jewlery_prices))
This is the base class and is not intended to end use. Please see the
documentation for the final derived classes (``Jitter``, ``LineraInterpolator``,
``StepInterpolator``) for more information on their specific methods of
interpolation.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
x = Required(Either(String, Seq(Float)), help="""
Independent coordinate denoting the location of a point.
""")
y = Required(Either(String, Seq(Float)), help="""
Dependent coordinate denoting the value of a point at a location.
""")
data = Nullable(Instance(ColumnarDataSource), help="""
Data which defines the source for the named columns if a string is passed to either the ``x`` or ``y`` parameters.
""")
clip = Bool(True, help="""
Determine if the interpolation should clip the result to include only values inside its predefined range.
If this is set to False, it will return the most value of the closest point.
""")
| Interpolator |
python | cherrypy__cherrypy | cherrypy/_cptree.py | {
"start": 5993,
"end": 11047
} | class ____(object):
"""A registry of CherryPy applications, mounted at diverse points.
An instance of this class may also be used as a WSGI callable (WSGI
application object), in which case it dispatches to all mounted
apps.
"""
apps = {}
"""
A dict of the form {script name: application}, where "script name"
is a string declaring the URI mount point (no trailing slash), and
"application" is an instance of cherrypy.Application (or an arbitrary
WSGI callable if you happen to be using a WSGI server)."""
def __init__(self):
"""Initialize registry Tree."""
self.apps = {}
def mount(self, root, script_name='', config=None):
"""Mount a new app from a root object, script_name, and config.
root
An instance of a "controller class" (a collection of page
handler methods) which represents the root of the application.
This may also be an Application instance, or None if using
a dispatcher other than the default.
script_name
A string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the
URL at which to mount the given root. For example, if root.index()
will handle requests to "http://www.example.com:8080/dept/app1/",
then the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the
root of the URI, it MUST be an empty string (not "/").
config
A file or dict containing application config.
"""
if script_name is None:
raise TypeError(
"The 'script_name' argument may not be None. Application "
'objects may, however, possess a script_name of None (in '
'order to inpect the WSGI environ for SCRIPT_NAME upon each '
'request). You cannot mount such Applications on this Tree; '
'you must pass them to a WSGI server interface directly.',
)
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip('/')
if isinstance(root, Application):
app = root
if script_name != '' and script_name != app.script_name:
raise ValueError(
'Cannot specify a different script name and pass an '
'Application instance to cherrypy.mount',
)
script_name = app.script_name
else:
app = Application(root, script_name)
# If mounted at "", add favicon.ico
needs_favicon = (
script_name == ''
and root is not None
and not hasattr(root, 'favicon_ico')
)
if needs_favicon:
favicon = os.path.join(
os.getcwd(),
os.path.dirname(__file__),
'favicon.ico',
)
root.favicon_ico = tools.staticfile.handler(favicon)
if config:
app.merge(config)
self.apps[script_name] = app
return app
def graft(self, wsgi_callable, script_name=''):
"""Mount a wsgi callable at the given script_name."""
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip('/')
self.apps[script_name] = wsgi_callable
def script_name(self, path=None):
"""Return the script_name of the app at the given path, or None.
If path is None, cherrypy.request is used.
"""
if path is None:
try:
request = cherrypy.serving.request
path = httputil.urljoin(request.script_name, request.path_info)
except AttributeError:
return None
while True:
if path in self.apps:
return path
if path == '':
return None
# Move one node up the tree and try again.
path = path[: path.rfind('/')]
def __call__(self, environ, start_response):
"""Pre-initialize WSGI env and call WSGI-callable."""
# If you're calling this, then you're probably setting SCRIPT_NAME
# to '' (some WSGI servers always set SCRIPT_NAME to '').
# Try to look up the app using the full path.
env1x = environ
path = httputil.urljoin(
env1x.get('SCRIPT_NAME', ''),
env1x.get('PATH_INFO', ''),
)
sn = self.script_name(path or '/')
if sn is None:
start_response('404 Not Found', [])
return []
app = self.apps[sn]
# Correct the SCRIPT_NAME and PATH_INFO environ entries.
environ = environ.copy()
environ['SCRIPT_NAME'] = sn
environ['PATH_INFO'] = path[len(sn.rstrip('/')) :]
return app(environ, start_response)
| Tree |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-linnworks/source_linnworks/streams.py | {
"start": 9039,
"end": 10508
} | class ____(HttpSubStream, IncrementalLinnworksStream):
# https://apps.linnworks.net/Api/Method/Orders-GetOrdersById
# Response: List<OrderDetails> https://apps.linnworks.net/Api/Class/linnworks-spa-commondata-OrderManagement-ClassBase-OrderDetails
# Allows 250 calls per minute
primary_key = "NumOrderId"
cursor_field = "ProcessedDateTime"
page_size = 100
def __init__(self, **kwargs):
super().__init__(ProcessedOrders(**kwargs), **kwargs)
def path(self, **kwargs) -> str:
return "/api/Orders/GetOrdersById"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
parent_stream_state = None
if stream_state:
parent_stream_state = {"dProcessedOn": stream_state["ProcessedDateTime"]}
buffer = []
for slice in HttpSubStream.stream_slices(self, stream_state=parent_stream_state, **kwargs):
buffer.append(slice["parent"]["pkOrderID"])
if len(buffer) == self.page_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def request_body_data(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return {
"pkOrderIds": json.dumps(stream_slice, separators=(",", ":")),
}
| ProcessedOrderDetails |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/decl_api.py | {
"start": 4401,
"end": 4981
} | class ____(
_DynamicAttributesType,
# Inspectable is used only by the mypy plugin
inspection.Inspectable[Mapper[Any]],
):
"""Metaclass that may be used in conjunction with the
:class:`_orm.DeclarativeBase` class to support addition of class
attributes dynamically.
"""
@compat_typing.dataclass_transform(
field_specifiers=(
MappedColumn,
RelationshipProperty,
Composite,
Synonym,
mapped_column,
relationship,
composite,
synonym,
deferred,
),
)
| DeclarativeAttributeIntercept |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 23573,
"end": 23710
} | class ____:
pass
# TODO: the names used across CHAR / VARCHAR / NCHAR / NVARCHAR
# here are inconsistent and not very good
| _LOBDataType |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-reka/llama_index/llms/reka/base.py | {
"start": 1882,
"end": 18874
} | class ____(CustomLLM):
"""Reka LLM integration for LlamaIndex."""
model: str = Field(default=DEFAULT_REKA_MODEL, description="The Reka model to use.")
temperature: float = Field(
default=DEFAULT_TEMPERATURE,
description="The temperature to use for sampling.",
gte=0.0,
lte=1.0,
)
max_tokens: int = Field(
default=DEFAULT_REKA_MAX_TOKENS,
description="The maximum number of tokens to generate.",
gt=0,
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional keyword arguments for Reka API calls.",
)
_client: Reka = PrivateAttr()
_aclient: AsyncReka = PrivateAttr()
def __init__(
self,
model: str = DEFAULT_REKA_MODEL,
api_key: Optional[str] = None,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_REKA_MAX_TOKENS,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
) -> None:
"""
Initialize the RekaLLM instance.
Args:
model (str): The Reka model to use, choose from ['reka-flash', 'reka-core', 'reka-edge'].
api_key (Optional[str]): The API key for Reka.
temperature (float): The temperature to use for sampling.
max_tokens (int): The maximum number of tokens to generate.
additional_kwargs (Optional[Dict[str, Any]]): Additional keyword arguments for Reka API calls.
callback_manager (Optional[CallbackManager]): A callback manager for handling callbacks.
Raises:
ValueError: If the Reka API key is not provided and not set in the environment.
Example:
>>> reka_llm = RekaLLM(
... model="reka-flash",
... api_key="your-api-key-here",
... temperature=0.7,
... max_tokens=100
... )
"""
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_key = api_key or os.getenv("REKA_API_KEY")
if not api_key:
raise ValueError(
"Reka API key is required. Please provide it as an argument or set the REKA_API_KEY environment variable."
)
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
)
self._client = Reka(api_key=api_key)
self._aclient = AsyncReka(api_key=api_key)
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=DEFAULT_REKA_CONTEXT_WINDOW,
num_output=self.max_tokens,
model_name=self.model,
is_chat_model=True,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
}
return {**base_kwargs, **self.additional_kwargs}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {**self._model_kwargs, **kwargs}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
"""
Send a chat request to the Reka API.
Args:
messages (Sequence[ChatMessage]): A sequence of chat messages.
**kwargs: Additional keyword arguments for the API call.
Returns:
ChatResponse: The response from the Reka API.
Raises:
ValueError: If there's an error with the Reka API call.
Example:
>>> reka_llm = RekaLLM(api_key="your-api-key-here")
>>> messages = [
... ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
... ChatMessage(role=MessageRole.USER, content="What's the capital of France?")
... ]
>>> response = reka_llm.chat(messages)
>>> print(response.message.content)
"""
all_kwargs = self._get_all_kwargs(**kwargs)
reka_messages = process_messages_for_reka(messages)
try:
response = self._client.chat.create(messages=reka_messages, **all_kwargs)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=response.responses[0].message.content,
),
raw=response.__dict__,
)
except ApiError as e:
raise ValueError(f"Reka API error: {e.status_code} - {e.body}")
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
"""
Send a completion request to the Reka API.
Args:
prompt (str): The prompt for completion.
**kwargs: Additional keyword arguments for the API call.
Returns:
CompletionResponse: The response from the Reka API.
Raises:
ValueError: If there's an error with the Reka API call.
Example:
>>> reka_llm = RekaLLM(api_key="your-api-key-here")
>>> response = reka_llm.complete("The capital of France is")
>>> print(response.text)
"""
all_kwargs = self._get_all_kwargs(**kwargs)
try:
response = self._client.chat.create(
messages=[{"role": "user", "content": prompt}], **all_kwargs
)
return CompletionResponse(
text=response.responses[0].message.content,
raw=response.__dict__,
)
except ApiError as e:
raise ValueError(f"Reka API error: {e.status_code} - {e.body}")
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
"""
Send a streaming chat request to the Reka API.
Args:
messages (Sequence[ChatMessage]): A sequence of chat messages.
**kwargs: Additional keyword arguments for the API call.
Returns:
ChatResponseGen: A generator yielding chat responses.
Raises:
ValueError: If there's an error with the Reka API call.
Example:
>>> reka_llm = RekaLLM(api_key="your-api-key-here")
>>> messages = [
... ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
... ChatMessage(role=MessageRole.USER, content="Tell me a short story.")
... ]
>>> for chunk in reka_llm.stream_chat(messages):
... print(chunk.delta, end="", flush=True)
"""
all_kwargs = self._get_all_kwargs(**kwargs)
reka_messages = process_messages_for_reka(messages)
try:
stream = self._client.chat.create_stream(
messages=reka_messages, **all_kwargs
)
except ApiError as e:
raise ValueError(f"Reka API error: {e.status_code} - {e.body}")
def gen() -> ChatResponseGen:
prev_content = ""
for chunk in stream:
content = chunk.responses[0].chunk.content
content_delta = content[len(prev_content) :]
prev_content = content
yield ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=content,
),
delta=content_delta,
raw=chunk.__dict__,
)
return gen()
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
"""
Send a streaming completion request to the Reka API.
Args:
prompt (str): The prompt for completion.
**kwargs: Additional keyword arguments for the API call.
Returns:
CompletionResponseGen: A generator yielding completion responses.
Raises:
ValueError: If there's an error with the Reka API call.
Example:
>>> reka_llm = RekaLLM(api_key="your-api-key-here")
>>> prompt = "Write a haiku about programming:"
>>> for chunk in reka_llm.stream_complete(prompt):
... print(chunk.delta, end="", flush=True)
"""
all_kwargs = self._get_all_kwargs(**kwargs)
try:
stream = self._client.chat.create_stream(
messages=[{"role": "user", "content": prompt}], **all_kwargs
)
except ApiError as e:
raise ValueError(f"Reka API error: {e.status_code} - {e.body}")
def gen() -> CompletionResponseGen:
prev_text = ""
for chunk in stream:
text = chunk.responses[0].chunk.content
text_delta = text[len(prev_text) :]
prev_text = text
yield CompletionResponse(
text=text,
delta=text_delta,
raw=chunk.__dict__,
)
return gen()
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
"""
Send an asynchronous chat request to the Reka API.
Args:
messages (Sequence[ChatMessage]): A sequence of chat messages.
**kwargs: Additional keyword arguments for the API call.
Returns:
ChatResponse: The response from the Reka API.
Raises:
ValueError: If there's an error with the Reka API call.
Example:
>>> import asyncio
>>> from llama_index.llms.reka import RekaLLM
>>> from llama_index.core.base.llms.types import ChatMessage, MessageRole
>>>
>>> async def main():
... reka_llm = RekaLLM(api_key="your-api-key-here")
... messages = [
... ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
... ChatMessage(role=MessageRole.USER, content="What's the meaning of life?")
... ]
... response = await reka_llm.achat(messages)
... print(response.message.content)
>>>
>>> asyncio.run(main())
"""
all_kwargs = self._get_all_kwargs(**kwargs)
reka_messages = process_messages_for_reka(messages)
try:
response = await self._aclient.chat.create(
messages=reka_messages, **all_kwargs
)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=response.responses[0].message.content,
),
raw=response.__dict__,
)
except ApiError as e:
raise ValueError(f"Reka API error: {e.status_code} - {e.body}")
@llm_completion_callback()
async def acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
"""
Send an asynchronous completion request to the Reka API.
Args:
prompt (str): The prompt for completion.
**kwargs: Additional keyword arguments for the API call.
Returns:
CompletionResponse: The response from the Reka API.
Raises:
ValueError: If there's an error with the Reka API call.
Example:
>>> import asyncio
>>> from llama_index.llms.reka import RekaLLM
>>>
>>> async def main():
... reka_llm = RekaLLM(api_key="your-api-key-here")
... prompt = "The capital of France is"
... response = await reka_llm.acomplete(prompt)
... print(response.text)
>>>
>>> asyncio.run(main())
"""
all_kwargs = self._get_all_kwargs(**kwargs)
try:
response = await self._aclient.chat.create(
messages=[{"role": "user", "content": prompt}], **all_kwargs
)
return CompletionResponse(
text=response.responses[0].message.content,
raw=response.__dict__,
)
except ApiError as e:
raise ValueError(f"Reka API error: {e.status_code} - {e.body}")
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
"""
Send an asynchronous streaming chat request to the Reka API.
Args:
messages (Sequence[ChatMessage]): A sequence of chat messages.
**kwargs: Additional keyword arguments for the API call.
Returns:
ChatResponseAsyncGen: An asynchronous generator yielding chat responses.
Raises:
ValueError: If there's an error with the Reka API call.
Example:
>>> import asyncio
>>> from llama_index.llms.reka import RekaLLM
>>> from llama_index.core.base.llms.types import ChatMessage, MessageRole
>>>
>>> async def main():
... reka_llm = RekaLLM(api_key="your-api-key-here")
... messages = [
... ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
... ChatMessage(role=MessageRole.USER, content="Tell me a short story about a robot.")
... ]
... async for chunk in await reka_llm.astream_chat(messages):
... print(chunk.delta, end="", flush=True)
... print() # New line after the story is complete
>>>
>>> asyncio.run(main())
"""
all_kwargs = self._get_all_kwargs(**kwargs)
reka_messages = process_messages_for_reka(messages)
try:
stream = self._aclient.chat.create_stream(
messages=reka_messages, **all_kwargs
)
except ApiError as e:
raise ValueError(f"Reka API error: {e.status_code} - {e.body}")
async def gen() -> ChatResponseAsyncGen:
prev_content = ""
async for chunk in stream:
content = chunk.responses[0].chunk.content
content_delta = content[len(prev_content) :]
prev_content = content
yield ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=content,
),
delta=content_delta,
raw=chunk.__dict__,
)
return gen()
@llm_completion_callback()
async def astream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseAsyncGen:
"""
Send an asynchronous streaming completion request to the Reka API.
Args:
prompt (str): The prompt for completion.
**kwargs: Additional keyword arguments for the API call.
Returns:
CompletionResponseAsyncGen: An asynchronous generator yielding completion responses.
Raises:
ValueError: If there's an error with the Reka API call.
Example:
>>> import asyncio
>>> from llama_index.llms.reka import RekaLLM
>>>
>>> async def main():
... reka_llm = RekaLLM(api_key="your-api-key-here")
... prompt = "Write a haiku about artificial intelligence:"
... async for chunk in await reka_llm.astream_complete(prompt):
... print(chunk.delta, end="", flush=True)
... print() # New line after the haiku is complete
>>>
>>> asyncio.run(main())
"""
all_kwargs = self._get_all_kwargs(**kwargs)
try:
stream = self._aclient.chat.create_stream(
messages=[{"role": "user", "content": prompt}], **all_kwargs
)
except ApiError as e:
raise ValueError(f"Reka API error: {e.status_code} - {e.body}")
async def gen() -> CompletionResponseAsyncGen:
prev_text = ""
async for chunk in stream:
text = chunk.responses[0].chunk.content
text_delta = text[len(prev_text) :]
prev_text = text
yield CompletionResponse(
text=text,
delta=text_delta,
raw=chunk.__dict__,
)
return gen()
| RekaLLM |
python | allegroai__clearml | clearml/backend_api/services/v2_13/auth.py | {
"start": 17253,
"end": 18714
} | class ____(Response):
"""
Response of auth.revoke_credentials endpoint.
:param revoked: Number of credentials revoked
:type revoked: int
"""
_service = "auth"
_action = "revoke_credentials"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"revoked": {
"description": "Number of credentials revoked",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, revoked: Optional[int] = None, **kwargs: Any) -> None:
super(RevokeCredentialsResponse, self).__init__(**kwargs)
self.revoked = revoked
@schema_property("revoked")
def revoked(self) -> Optional[int]:
return self._property_revoked
@revoked.setter
def revoked(self, value: Optional[int]) -> None:
if value is None:
self._property_revoked = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "revoked", six.integer_types)
self._property_revoked = value
response_mapping = {
LoginRequest: LoginResponse,
CreateCredentialsRequest: CreateCredentialsResponse,
GetCredentialsRequest: GetCredentialsResponse,
RevokeCredentialsRequest: RevokeCredentialsResponse,
EditUserRequest: EditUserResponse,
}
| RevokeCredentialsResponse |
python | django__django | tests/requests_tests/tests.py | {
"start": 51505,
"end": 53768
} | class ____(SimpleTestCase):
ENVIRON = {
# Non-headers are ignored.
"PATH_INFO": "/somepath/",
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
"SERVER_NAME": "internal.com",
"SERVER_PORT": 80,
# These non-HTTP prefixed headers are included.
"CONTENT_TYPE": "text/html",
"CONTENT_LENGTH": "100",
# All HTTP-prefixed headers are included.
"HTTP_ACCEPT": "*",
"HTTP_HOST": "example.com",
"HTTP_USER_AGENT": "python-requests/1.2.0",
}
def test_base_request_headers(self):
request = HttpRequest()
request.META = self.ENVIRON
self.assertEqual(
dict(request.headers),
{
"Content-Type": "text/html",
"Content-Length": "100",
"Accept": "*",
"Host": "example.com",
"User-Agent": "python-requests/1.2.0",
},
)
def test_wsgi_request_headers(self):
request = WSGIRequest(self.ENVIRON)
self.assertEqual(
dict(request.headers),
{
"Content-Type": "text/html",
"Content-Length": "100",
"Accept": "*",
"Host": "example.com",
"User-Agent": "python-requests/1.2.0",
},
)
def test_wsgi_request_headers_getitem(self):
request = WSGIRequest(self.ENVIRON)
self.assertEqual(request.headers["User-Agent"], "python-requests/1.2.0")
self.assertEqual(request.headers["user-agent"], "python-requests/1.2.0")
self.assertEqual(request.headers["user_agent"], "python-requests/1.2.0")
self.assertEqual(request.headers["Content-Type"], "text/html")
self.assertEqual(request.headers["Content-Length"], "100")
def test_wsgi_request_headers_get(self):
request = WSGIRequest(self.ENVIRON)
self.assertEqual(request.headers.get("User-Agent"), "python-requests/1.2.0")
self.assertEqual(request.headers.get("user-agent"), "python-requests/1.2.0")
self.assertEqual(request.headers.get("Content-Type"), "text/html")
self.assertEqual(request.headers.get("Content-Length"), "100")
| RequestHeadersTests |
python | PyCQA__pylint | doc/data/messages/i/invalid-index-returned/bad.py | {
"start": 0,
"end": 136
} | class ____:
"""__index__ returns a dict"""
def __index__(self): # [invalid-index-returned]
return {"19": "19"}
| CustomIndex |
python | django__django | tests/test_runner/test_discover_runner.py | {
"start": 30809,
"end": 33218
} | class ____(SimpleTestCase):
runner = DiscoverRunner(verbosity=2)
skip_msg = "Skipping setup of unused database(s): "
def get_databases(self, test_labels):
with captured_stdout() as stdout:
suite = self.runner.build_suite(test_labels)
databases = self.runner.get_databases(suite)
return databases, stdout.getvalue()
def assertSkippedDatabases(self, test_labels, expected_databases):
databases, output = self.get_databases(test_labels)
self.assertEqual(databases, expected_databases)
skipped_databases = set(connections) - set(expected_databases)
if skipped_databases:
self.assertIn(self.skip_msg + ", ".join(sorted(skipped_databases)), output)
else:
self.assertNotIn(self.skip_msg, output)
def test_mixed(self):
databases, output = self.get_databases(["test_runner_apps.databases.tests"])
self.assertEqual(databases, {"default": True, "other": False})
self.assertNotIn(self.skip_msg, output)
def test_all(self):
databases, output = self.get_databases(
["test_runner_apps.databases.tests.AllDatabasesTests"]
)
self.assertEqual(databases, {alias: False for alias in connections})
self.assertNotIn(self.skip_msg, output)
def test_default_and_other(self):
self.assertSkippedDatabases(
[
"test_runner_apps.databases.tests.DefaultDatabaseTests",
"test_runner_apps.databases.tests.OtherDatabaseTests",
],
{"default": False, "other": False},
)
def test_default_only(self):
self.assertSkippedDatabases(
[
"test_runner_apps.databases.tests.DefaultDatabaseTests",
],
{"default": False},
)
def test_other_only(self):
self.assertSkippedDatabases(
["test_runner_apps.databases.tests.OtherDatabaseTests"], {"other": False}
)
def test_no_databases_required(self):
self.assertSkippedDatabases(
["test_runner_apps.databases.tests.NoDatabaseTests"], {}
)
def test_serialize(self):
databases, _ = self.get_databases(
["test_runner_apps.databases.tests.DefaultDatabaseSerializedTests"]
)
self.assertEqual(databases, {"default": True})
| DiscoverRunnerGetDatabasesTests |
python | keras-team__keras | keras/src/backend/torch/optimizers/torch_adadelta.py | {
"start": 147,
"end": 1672
} | class ____(
torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adadelta
):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
rho = self.rho
accumulated_grads = [
self._accumulated_grads[self._get_variable_index(variable)].value
for variable in keras_variables
]
accumulated_delta_vars = [
self._accumulated_delta_vars[
self._get_variable_index(variable)
].value
for variable in keras_variables
]
torch._foreach_mul_(accumulated_grads, rho)
torch._foreach_add_(
accumulated_grads, torch._foreach_mul(grads, grads), alpha=1 - rho
)
def rms(x):
return torch._foreach_sqrt(torch._foreach_add(x, self.epsilon))
delta_vars = torch._foreach_mul(
torch._foreach_div(
torch._foreach_mul(rms(accumulated_delta_vars), grads),
rms(accumulated_grads),
),
-1,
)
torch._foreach_mul_(accumulated_delta_vars, rho)
torch._foreach_add_(
accumulated_delta_vars,
torch._foreach_mul(delta_vars, delta_vars),
alpha=1 - rho,
)
torch._foreach_add_(variables, delta_vars, alpha=lr)
| Adadelta |
python | django-guardian__django-guardian | guardian/testapp/tests/test_custompkmodel.py | {
"start": 198,
"end": 1107
} | class ____(TestCase):
"""
Tests against custom model with primary key other than *standard*
``id`` integer field.
"""
def setUp(self):
self.user = get_user_model().objects.create(username="joe")
self.ctype = ContentType.objects.create(model="bar", app_label="fake-for-guardian-tests")
def test_assign_perm(self):
assign_perm("contenttypes.change_contenttype", self.user, self.ctype)
self.assertTrue(self.user.has_perm("contenttypes.change_contenttype", self.ctype))
def test_remove_perm(self):
assign_perm("contenttypes.change_contenttype", self.user, self.ctype)
self.assertTrue(self.user.has_perm("contenttypes.change_contenttype", self.ctype))
remove_perm("contenttypes.change_contenttype", self.user, self.ctype)
self.assertFalse(self.user.has_perm("contenttypes.change_contenttype", self.ctype))
| CustomPKModelTest |
python | run-llama__llama_index | llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-redis/llama_index/storage/kvstore/redis/base.py | {
"start": 258,
"end": 6815
} | class ____(BaseKVStore):
"""
Redis KV Store.
Args:
redis_uri (str): Redis URI
redis_client (Any): Redis client
async_redis_client (Any): Async Redis client
Raises:
ValueError: If redis-py is not installed
Examples:
>>> from llama_index.storage.kvstore.redis import RedisKVStore
>>> # Create a RedisKVStore
>>> redis_kv_store = RedisKVStore(
>>> redis_url="redis://127.0.0.1:6379")
"""
def __init__(
self,
redis_uri: Optional[str] = "redis://127.0.0.1:6379",
redis_client: Optional[Redis] = None,
async_redis_client: Optional[AsyncRedis] = None,
**kwargs: Any,
) -> None:
# user could inject customized redis client.
# for instance, redis have specific TLS connection, etc.
if redis_client is not None:
self._redis_client = redis_client
# create async client from sync client
if async_redis_client is not None:
self._async_redis_client = async_redis_client
else:
try:
self._async_redis_client = AsyncRedis.from_url(
self._redis_client.connection_pool.connection_kwargs["url"]
)
except Exception:
print(
"Could not create async redis client from sync client, "
"pass in `async_redis_client` explicitly."
)
self._async_redis_client = None
elif redis_uri is not None:
# otherwise, try initializing redis client
try:
# connect to redis from url
self._redis_client = Redis.from_url(redis_uri, **kwargs)
self._async_redis_client = AsyncRedis.from_url(redis_uri, **kwargs)
except ValueError as e:
raise ValueError(f"Redis failed to connect: {e}")
else:
raise ValueError("Either 'redis_client' or redis_url must be provided.")
def put(self, key: str, val: dict, collection: str = DEFAULT_COLLECTION) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
self._redis_client.hset(name=collection, key=key, value=json.dumps(val))
async def aput(
self, key: str, val: dict, collection: str = DEFAULT_COLLECTION
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
await self._async_redis_client.hset(
name=collection, key=key, value=json.dumps(val)
)
def put_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""
Put a dictionary of key-value pairs into the store.
Args:
kv_pairs (List[Tuple[str, dict]]): key-value pairs
collection (str): collection name
"""
with self._redis_client.pipeline() as pipe:
cur_batch = 0
for key, val in kv_pairs:
pipe.hset(name=collection, key=key, value=json.dumps(val))
cur_batch += 1
if cur_batch >= batch_size:
cur_batch = 0
pipe.execute()
if cur_batch > 0:
pipe.execute()
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
val_str = self._redis_client.hget(name=collection, key=key)
if val_str is None:
return None
return json.loads(val_str)
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
val_str = await self._async_redis_client.hget(name=collection, key=key)
if val_str is None:
return None
return json.loads(val_str)
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
collection_kv_dict = {}
for key, val_str in self._redis_client.hscan_iter(name=collection):
value = dict(json.loads(val_str))
collection_kv_dict[key.decode()] = value
return collection_kv_dict
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
collection_kv_dict = {}
async for key, val_str in self._async_redis_client.hscan_iter(name=collection):
value = dict(json.loads(val_str))
collection_kv_dict[key.decode()] = value
return collection_kv_dict
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
deleted_num = self._redis_client.hdel(collection, key)
return bool(deleted_num > 0)
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
deleted_num = await self._async_redis_client.hdel(collection, key)
return bool(deleted_num > 0)
@classmethod
def from_host_and_port(
cls,
host: str,
port: int,
) -> "RedisKVStore":
"""
Load a RedisKVStore from a Redis host and port.
Args:
host (str): Redis host
port (int): Redis port
"""
url = f"redis://{host}:{port}".format(host=host, port=port)
return cls(redis_uri=url)
@classmethod
def from_redis_client(cls, redis_client: Any) -> "RedisKVStore":
"""
Load a RedisKVStore from a Redis Client.
Args:
redis_client (Redis): Redis client
"""
return cls(redis_client=redis_client)
| RedisKVStore |
python | dask__distributed | distributed/tests/test_profile.py | {
"start": 7542,
"end": 9408
} | class ____:
f_lasti: int
f_code: FakeCode
f_lineno: int | None = None
f_back: FakeFrame | None = None
f_globals: dict[str, object] = dataclasses.field(default_factory=dict)
@pytest.mark.parametrize(
"f_lasti,f_lineno",
[
(-1, 1),
(0, 2),
(1, 2),
(11, 2),
(12, 3),
(21, 4),
(22, 4),
(23, 4),
(24, 2),
(25, 2),
(26, 2),
(27, 2),
(100, 2),
],
)
def test_info_frame_f_lineno(f_lasti: int, f_lineno: int) -> None:
assert info_frame(FakeFrame(f_lasti=f_lasti, f_code=FAKE_CODE)) == { # type: ignore
"filename": "<stdin>",
"name": "example",
"line_number": f_lineno,
"line": "",
}
@pytest.mark.parametrize(
"f_lasti,f_lineno",
[
(-1, 1),
(0, 2),
(1, 2),
(11, 2),
(12, 3),
(21, 4),
(22, 4),
(23, 4),
(24, 2),
(25, 2),
(26, 2),
(27, 2),
(100, 2),
],
)
def test_call_stack_f_lineno(f_lasti: int, f_lineno: int) -> None:
assert call_stack(FakeFrame(f_lasti=f_lasti, f_code=FAKE_CODE)) == [ # type: ignore
f' File "<stdin>", line {f_lineno}, in example\n\t'
]
def test_stack_overflow():
old = sys.getrecursionlimit()
sys.setrecursionlimit(300)
try:
state = create()
frame = None
def f(i):
if i == 0:
nonlocal frame
frame = sys._current_frames()[threading.get_ident()]
return
else:
return f(i - 1)
f(sys.getrecursionlimit() - 100)
process(frame, None, state)
assert state["children"]
assert state["count"]
assert merge(state, state, state)
finally:
sys.setrecursionlimit(old)
| FakeFrame |
python | getsentry__sentry-python | sentry_sdk/_metrics_batcher.py | {
"start": 331,
"end": 5042
} | class ____:
MAX_METRICS_BEFORE_FLUSH = 1000
MAX_METRICS_BEFORE_DROP = 10_000
FLUSH_WAIT_TIME = 5.0
def __init__(
self,
capture_func, # type: Callable[[Envelope], None]
record_lost_func, # type: Callable[..., None]
):
# type: (...) -> None
self._metric_buffer = [] # type: List[Metric]
self._capture_func = capture_func
self._record_lost_func = record_lost_func
self._running = True
self._lock = threading.Lock()
self._flush_event = threading.Event() # type: threading.Event
self._flusher = None # type: Optional[threading.Thread]
self._flusher_pid = None # type: Optional[int]
def _ensure_thread(self):
# type: (...) -> bool
if not self._running:
return False
pid = os.getpid()
if self._flusher_pid == pid:
return True
with self._lock:
if self._flusher_pid == pid:
return True
self._flusher_pid = pid
self._flusher = threading.Thread(target=self._flush_loop)
self._flusher.daemon = True
try:
self._flusher.start()
except RuntimeError:
self._running = False
return False
return True
def _flush_loop(self):
# type: (...) -> None
while self._running:
self._flush_event.wait(self.FLUSH_WAIT_TIME + random.random())
self._flush_event.clear()
self._flush()
def add(
self,
metric, # type: Metric
):
# type: (...) -> None
if not self._ensure_thread() or self._flusher is None:
return None
with self._lock:
if len(self._metric_buffer) >= self.MAX_METRICS_BEFORE_DROP:
self._record_lost_func(
reason="queue_overflow",
data_category="trace_metric",
quantity=1,
)
return None
self._metric_buffer.append(metric)
if len(self._metric_buffer) >= self.MAX_METRICS_BEFORE_FLUSH:
self._flush_event.set()
def kill(self):
# type: (...) -> None
if self._flusher is None:
return
self._running = False
self._flush_event.set()
self._flusher = None
def flush(self):
# type: (...) -> None
self._flush()
@staticmethod
def _metric_to_transport_format(metric):
# type: (Metric) -> Any
def format_attribute(val):
# type: (Union[int, float, str, bool]) -> Any
if isinstance(val, bool):
return {"value": val, "type": "boolean"}
if isinstance(val, int):
return {"value": val, "type": "integer"}
if isinstance(val, float):
return {"value": val, "type": "double"}
if isinstance(val, str):
return {"value": val, "type": "string"}
return {"value": safe_repr(val), "type": "string"}
res = {
"timestamp": metric["timestamp"],
"trace_id": metric["trace_id"],
"name": metric["name"],
"type": metric["type"],
"value": metric["value"],
"attributes": {
k: format_attribute(v) for (k, v) in metric["attributes"].items()
},
}
if metric.get("span_id") is not None:
res["span_id"] = metric["span_id"]
if metric.get("unit") is not None:
res["unit"] = metric["unit"]
return res
def _flush(self):
# type: (...) -> Optional[Envelope]
envelope = Envelope(
headers={"sent_at": format_timestamp(datetime.now(timezone.utc))}
)
with self._lock:
if len(self._metric_buffer) == 0:
return None
envelope.add_item(
Item(
type="trace_metric",
content_type="application/vnd.sentry.items.trace-metric+json",
headers={
"item_count": len(self._metric_buffer),
},
payload=PayloadRef(
json={
"items": [
self._metric_to_transport_format(metric)
for metric in self._metric_buffer
]
}
),
)
)
self._metric_buffer.clear()
self._capture_func(envelope)
return envelope
| MetricsBatcher |
python | milvus-io__pymilvus | pymilvus/client/abstract.py | {
"start": 16120,
"end": 17116
} | class ____:
def __init__(self):
self.__index = 0
def __iter__(self):
return self
def __getitem__(self, item: Any):
if isinstance(item, slice):
_start = item.start or 0
_end = min(item.stop, self.__len__()) if item.stop else self.__len__()
_step = item.step or 1
return [self.get__item(i) for i in range(_start, _end, _step)]
if item >= self.__len__():
msg = "Index out of range"
raise IndexError(msg)
return self.get__item(item)
def __next__(self):
while self.__index < self.__len__():
self.__index += 1
return self.__getitem__(self.__index - 1)
# iterate stop, raise Exception
self.__index = 0
raise StopIteration
def __str__(self):
return str(list(map(str, self.__getitem__(slice(0, 10)))))
@abc.abstractmethod
def get__item(self, item: Any):
raise NotImplementedError
| LoopBase |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 280033,
"end": 281521
} | class ____(rv_continuous):
r"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is:
.. math::
f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s)
(\Phi(-\log(x)/s))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`s, c > 0`.
`powerlognorm` takes :math:`c` and :math:`s` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
i_s = _ShapeInfo("s", False, (0, np.inf), (False, False))
return [ic, i_s]
def _pdf(self, x, c, s):
return np.exp(self._logpdf(x, c, s))
def _logpdf(self, x, c, s):
return (np.log(c) - np.log(x) - np.log(s) +
_norm_logpdf(np.log(x) / s) +
_norm_logcdf(-np.log(x) / s) * (c - 1.))
def _cdf(self, x, c, s):
return -sc.expm1(self._logsf(x, c, s))
def _ppf(self, q, c, s):
return self._isf(1 - q, c, s)
def _sf(self, x, c, s):
return np.exp(self._logsf(x, c, s))
def _logsf(self, x, c, s):
return _norm_logcdf(-np.log(x) / s) * c
def _isf(self, q, c, s):
return np.exp(-_norm_ppf(q**(1/c)) * s)
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
| powerlognorm_gen |
python | Netflix__metaflow | metaflow/_vendor/importlib_metadata/_adapters.py | {
"start": 80,
"end": 1862
} | class ____(email.message.Message):
multiple_use_keys = set(
map(
FoldedCase,
[
'Classifier',
'Obsoletes-Dist',
'Platform',
'Project-URL',
'Provides-Dist',
'Provides-Extra',
'Requires-Dist',
'Requires-External',
'Supported-Platform',
'Dynamic',
],
)
)
"""
Keys that may be indicated multiple times per PEP 566.
"""
def __new__(cls, orig: email.message.Message):
res = super().__new__(cls)
vars(res).update(vars(orig))
return res
def __init__(self, *args, **kwargs):
self._headers = self._repair_headers()
# suppress spurious error from mypy
def __iter__(self):
return super().__iter__()
def _repair_headers(self):
def redent(value):
"Correct for RFC822 indentation"
if not value or '\n' not in value:
return value
return textwrap.dedent(' ' * 8 + value)
headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
if self._payload:
headers.append(('Description', self.get_payload()))
return headers
@property
def json(self):
"""
Convert PackageMetadata to a JSON-compatible format
per PEP 0566.
"""
def transform(key):
value = self.get_all(key) if key in self.multiple_use_keys else self[key]
if key == 'Keywords':
value = re.split(r'\s+', value)
tk = key.lower().replace('-', '_')
return tk, value
return dict(map(transform, map(FoldedCase, self)))
| Message |
python | pydata__xarray | asv_bench/benchmarks/unstacking.py | {
"start": 805,
"end": 1863
} | class ____(Unstacking):
def setup(self, *args, **kwargs):
requires_sparse()
import sparse
data = sparse.random((500, 1000), random_state=0, fill_value=0)
self.da_full = xr.DataArray(data, dims=list("ab")).stack(flat_dim=[...])
self.da_missing = self.da_full[:-1]
mindex = pd.MultiIndex.from_arrays([np.arange(100), np.arange(100)])
self.da_eye_2d = xr.DataArray(np.ones((100,)), dims="z", coords={"z": mindex})
self.da_eye_3d = xr.DataArray(
np.ones((100, 50)),
dims=("z", "foo"),
coords={"z": mindex, "foo": np.arange(50)},
)
def time_unstack_to_sparse_2d(self):
self.da_eye_2d.unstack(sparse=True)
def time_unstack_to_sparse_3d(self):
self.da_eye_3d.unstack(sparse=True)
def peakmem_unstack_to_sparse_2d(self):
self.da_eye_2d.unstack(sparse=True)
def peakmem_unstack_to_sparse_3d(self):
self.da_eye_3d.unstack(sparse=True)
def time_unstack_pandas_slow(self):
pass
| UnstackingSparse |
python | joke2k__faker | faker/providers/company/hr_HR/__init__.py | {
"start": 45,
"end": 313
} | class ____(CompanyProvider):
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}} {{last_name}} {{company_suffix}}",
"{{last_name}}",
)
company_suffixes = (
"d.o.o.",
"d.d.",
"j.d.o.o.",
)
| Provider |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 40216,
"end": 41462
} | class ____:
"""Describes a barrier reference.
Attributes:
num_arrivals: The number of arrivals that will be recorded by this barrier.
num_barriers: The number of barriers that will be created. Individual
barriers can be accessed by indexing into the barrier Ref.
orders_tensor_core: If False, a successfull wait from one thread does not
guarantee that the TensorCore-related operations in other threads have
completed. Similarly, when False any TensorCore operation in the waiting
thread is allowed to begin before the wait succeeds.
"""
num_arrivals: int = 1
num_barriers: int = 1
orders_tensor_core: bool = False
def get_array_aval(self) -> jax_core.ShapedArray:
raise ValueError("Barriers are not arrays")
def get_ref_aval(self) -> state.AbstractRef:
aval = jax_core.ShapedArray(
[self.num_barriers],
BarrierType(
self.num_arrivals, orders_tensor_core=self.orders_tensor_core
),
)
return state.AbstractRef(aval, SMEM)
def __post_init__(self):
if self.num_arrivals < 1:
raise ValueError(
f"Num arrivals must be at least 1, but got {self.num_arrivals}"
)
@dataclasses.dataclass(frozen=True, kw_only=True)
| Barrier |
python | plotly__plotly.py | plotly/graph_objs/scatter/_stream.py | {
"start": 233,
"end": 3511
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter"
_path_str = "scatter.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | huggingface__transformers | src/transformers/models/plbart/modeling_plbart.py | {
"start": 25195,
"end": 37432
} | class ____(PLBartPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PLBartDecoderLayer`]
Args:
config: PLBartConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PLBartConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = PLBartScaledWordEmbedding(
config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale
)
self.embed_positions = PLBartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([PLBartDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# retrieve input_ids and inputs_embeds
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input)
# initialize `past_key_values`
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
batch_size, seq_length = inputs_embeds.size()[:-1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if attention_mask is None and not is_torchdynamo_compiling():
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
self_attn_cache = (
past_key_values.self_attention_cache
if isinstance(past_key_values, EncoderDecoderCache)
else past_key_values
)
attention_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=self_attn_cache,
)
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
# embed positions
positions = self.embed_positions(input, past_key_values_length, position_ids=cache_position)
positions = positions.to(inputs_embeds.device)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int):
"""
Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that PLBart does not
have a single `decoder_start_token_id` in contrast to other Bart-like models.
"""
prev_output_tokens = input_ids.clone()
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
prev_output_tokens.masked_fill_(prev_output_tokens == -100, pad_token_id)
index_of_eos = (prev_output_tokens.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
decoder_start_tokens = prev_output_tokens.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].clone()
prev_output_tokens[:, 0] = decoder_start_tokens
return prev_output_tokens
@auto_docstring
| PLBartDecoder |
python | dagster-io__dagster | python_modules/dagster/dagster/components/core/context.py | {
"start": 10202,
"end": 16001
} | class ____(ComponentDeclLoadContext):
"""Context object that provides environment and path information during component loading.
This context is automatically created and passed to component definitions when loading
a project's defs folder. Each Python module or folder in the defs directory receives
a unique context instance that provides access to the underlying ComponentDecl,
project structure, paths, and utilities for dynamic component instantiation.
The context enables components to:
- Access project and module path information
- Load other modules and definitions within the project
- Resolve relative imports and module names
- Access templating and resolution capabilities
Args:
path: The filesystem path of the component currently being loaded.
For a file: ``/path/to/project/src/project/defs/my_component.py``
For a directory: ``/path/to/project/src/project/defs/my_component/``
project_root: The root directory of the Dagster project, typically containing
``pyproject.toml`` or ``setup.py``. Example: ``/path/to/project``
defs_module_path: The filesystem path to the root defs folder.
Example: ``/path/to/project/src/project/defs``
defs_module_name: The Python module name for the root defs folder, used for
import resolution. Typically follows the pattern ``"project_name.defs"``.
Example: ``"my_project.defs"``
resolution_context: The resolution context used by the component templating
system for parameter resolution and variable substitution.
component_tree: The component tree that contains the component currently being loaded.
terminate_autoloading_on_keyword_files: Controls whether autoloading stops
when encountering ``definitions.py`` or ``component.py`` files.
**Deprecated**: This parameter will be removed after version 1.11.
component_decl: The associated ComponentDecl to the component being loaded.
Note:
This context is automatically provided by Dagster's autoloading system and
should not be instantiated manually in most cases. For testing purposes,
use ``ComponentTree.for_test().load_context`` to create a test instance.
See Also:
- :py:func:`dagster.definitions`: Decorator that receives this context
- :py:class:`dagster.Definitions`: The object typically returned by context-using functions
- :py:class:`dagster.components.resolved.context.ResolutionContext`: Underlying resolution context
- :py:class:`dagster.ComponentDeclLoadContext`: Context available when loading ComponentDecls
"""
component_decl: "ComponentDecl"
@staticmethod
def from_decl_load_context(
decl_load_context: ComponentDeclLoadContext,
component_decl: "ComponentDecl",
) -> "ComponentLoadContext":
"""Augments a ComponentDeclLoadContext with the ComponentDecl being loaded.
Args:
decl_load_context: The ComponentDeclLoadContext to augment.
component_decl: The ComponentDecl being loaded.
Returns:
ComponentLoadContext: An augmented context which can be used to
load and build definitions for the component.
"""
return ComponentLoadContext(
component_path=decl_load_context.component_path,
project_root=decl_load_context.project_root,
defs_module_path=decl_load_context.defs_module_path,
defs_module_name=decl_load_context.defs_module_name,
resolution_context=decl_load_context.resolution_context,
component_tree=decl_load_context.component_tree,
terminate_autoloading_on_keyword_files=decl_load_context.terminate_autoloading_on_keyword_files,
component_decl=component_decl,
)
def build_defs(self, defs_path: Union[Path, "ComponentPath"]) -> Definitions:
"""Builds definitions from the given defs subdirectory. Currently
does not incorporate postprocessing from parent defs modules.
Args:
defs_path: Path to the defs module to load. If relative, resolves relative to the defs root.
Returns:
Definitions: The definitions loaded from the given path.
"""
from dagster.components.core.defs_module import ComponentPath
resolved_path = ComponentPath.from_resolvable(self.defs_module_path, defs_path)
self.component_tree.mark_component_defs_dependency(
from_path=self.component_path, to_path=resolved_path
)
return self.component_tree.build_defs(resolved_path)
@deprecated(
breaking_version="1.13.0",
additional_warn_text="Use build_defs instead.",
)
def build_defs_at_path(self, defs_path: Union[Path, "ComponentPath"]) -> Definitions:
"""Builds definitions from the given defs subdirectory. Currently
does not incorporate postprocessing from parent defs modules.
Args:
defs_path: Path to the defs module to load. If relative, resolves relative to the defs root.
Returns:
Definitions: The definitions loaded from the given path.
"""
return self.build_defs(defs_path)
def for_path(self, path: Path) -> "Self":
"""Creates a new context for the given path.
Args:
path: The filesystem path to create a new context for.
Returns:
ComponentLoadContext: A new context for the given path.
"""
from dagster.components.core.defs_module import ComponentPath
component_path = ComponentPath.from_path(path=path)
return self.for_component_path(component_path)
| ComponentLoadContext |
python | tornadoweb__tornado | maint/test/mypy/good.py | {
"start": 65,
"end": 239
} | class ____(RequestHandler):
def get(self) -> None:
self.write("foo")
async def post(self) -> None:
await gen.sleep(1)
self.write("foo")
| MyHandler |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 23566,
"end": 24079
} | class ____(DeviceTypeTestBase):
device_type = "mps"
primary_device: ClassVar[str]
@classmethod
def get_primary_device(cls):
return cls.primary_device
@classmethod
def get_all_devices(cls):
# currently only one device is supported on MPS backend
prim_device = cls.get_primary_device()
return [prim_device]
@classmethod
def setUpClass(cls):
cls.primary_device = "mps:0"
def _should_stop_test_suite(self):
return False
| MPSTestBase |
python | plotly__plotly.py | plotly/graph_objs/layout/_xaxis.py | {
"start": 235,
"end": 134328
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout"
_path_str = "layout.xaxis"
_valid_props = {
"anchor",
"automargin",
"autorange",
"autorangeoptions",
"autotickangles",
"autotypenumbers",
"calendar",
"categoryarray",
"categoryarraysrc",
"categoryorder",
"color",
"constrain",
"constraintoward",
"dividercolor",
"dividerwidth",
"domain",
"dtick",
"exponentformat",
"fixedrange",
"gridcolor",
"griddash",
"gridwidth",
"hoverformat",
"insiderange",
"labelalias",
"layer",
"linecolor",
"linewidth",
"matches",
"maxallowed",
"minallowed",
"minexponent",
"minor",
"minorloglabels",
"mirror",
"modebardisable",
"nticks",
"overlaying",
"position",
"range",
"rangebreakdefaults",
"rangebreaks",
"rangemode",
"rangeselector",
"rangeslider",
"scaleanchor",
"scaleratio",
"separatethousands",
"showdividers",
"showexponent",
"showgrid",
"showline",
"showspikes",
"showticklabels",
"showtickprefix",
"showticksuffix",
"side",
"spikecolor",
"spikedash",
"spikemode",
"spikesnap",
"spikethickness",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabelindex",
"ticklabelindexsrc",
"ticklabelmode",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelshift",
"ticklabelstandoff",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"tickson",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"type",
"uirevision",
"unifiedhovertitle",
"visible",
"zeroline",
"zerolinecolor",
"zerolinelayer",
"zerolinewidth",
}
@property
def anchor(self):
"""
If set to an opposite-letter axis id (e.g. `x2`, `y`), this
axis is bound to the corresponding opposite-letter axis. If set
to "free", this axis' position is determined by `position`.
The 'anchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['free']
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?( domain)?$',
'^y([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
Any
"""
return self["anchor"]
@anchor.setter
def anchor(self, val):
self["anchor"] = val
@property
def automargin(self):
"""
Determines whether long tick labels automatically grow the
figure margins.
The 'automargin' property is a flaglist and may be specified
as a string containing:
- Any combination of ['height', 'width', 'left', 'right', 'top', 'bottom'] joined with '+' characters
(e.g. 'height+width')
OR exactly one of [True, False] (e.g. 'False')
Returns
-------
Any
"""
return self["automargin"]
@automargin.setter
def automargin(self, val):
self["automargin"] = val
@property
def autorange(self):
"""
Determines whether or not the range of this axis is computed in
relation to the input data. See `rangemode` for more info. If
`range` is provided and it has a value for both the lower and
upper bound, `autorange` is set to False. Using "min" applies
autorange only to set the minimum. Using "max" applies
autorange only to set the maximum. Using *min reversed* applies
autorange only to set the minimum on a reversed axis. Using
*max reversed* applies autorange only to set the maximum on a
reversed axis. Using "reversed" applies autorange on both ends
and reverses the axis direction.
The 'autorange' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed', 'min reversed', 'max reversed',
'min', 'max']
Returns
-------
Any
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
@property
def autorangeoptions(self):
"""
The 'autorangeoptions' property is an instance of Autorangeoptions
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.Autorangeoptions`
- A dict of string/value properties that will be passed
to the Autorangeoptions constructor
Returns
-------
plotly.graph_objs.layout.xaxis.Autorangeoptions
"""
return self["autorangeoptions"]
@autorangeoptions.setter
def autorangeoptions(self, val):
self["autorangeoptions"] = val
@property
def autotickangles(self):
"""
When `tickangle` is set to "auto", it will be set to the first
angle in this array that is large enough to prevent label
overlap.
The 'autotickangles' property is an info array that may be specified as:
* a list of elements where:
The 'autotickangles[i]' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
list
"""
return self["autotickangles"]
@autotickangles.setter
def autotickangles(self, val):
self["autotickangles"] = val
@property
def autotypenumbers(self):
"""
Using "strict" a numeric string in trace data is not converted
to a number. Using *convert types* a numeric string in trace
data may be treated as a number during automatic axis `type`
detection. Defaults to layout.autotypenumbers.
The 'autotypenumbers' property is an enumeration that may be specified as:
- One of the following enumeration values:
['convert types', 'strict']
Returns
-------
Any
"""
return self["autotypenumbers"]
@autotypenumbers.setter
def autotypenumbers(self, val):
self["autotypenumbers"] = val
@property
def calendar(self):
"""
Sets the calendar system to use for `range` and `tick0` if this
is a date axis. This does not set the calendar for interpreting
data on this axis, that's specified in the trace or via the
global `layout.calendar`
The 'calendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["calendar"]
@calendar.setter
def calendar(self, val):
self["calendar"] = val
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`. Set `categoryorder` to *total ascending* or
*total descending* if order should be determined by the
numerical order of the values. Similarly, the order can be
determined by the min, max, sum, mean, geometric mean or median
of all the values.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array', 'total ascending', 'total descending', 'min
ascending', 'min descending', 'max ascending', 'max
descending', 'sum ascending', 'sum descending', 'mean
ascending', 'mean descending', 'geometric mean ascending',
'geometric mean descending', 'median ascending', 'median
descending']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def constrain(self):
"""
If this axis needs to be compressed (either due to its own
`scaleanchor` and `scaleratio` or those of the other axis),
determines how that happens: by increasing the "range", or by
decreasing the "domain". Default is "domain" for axes
containing image traces, "range" otherwise.
The 'constrain' property is an enumeration that may be specified as:
- One of the following enumeration values:
['range', 'domain']
Returns
-------
Any
"""
return self["constrain"]
@constrain.setter
def constrain(self, val):
self["constrain"] = val
@property
def constraintoward(self):
"""
If this axis needs to be compressed (either due to its own
`scaleanchor` and `scaleratio` or those of the other axis),
determines which direction we push the originally specified
plot area. Options are "left", "center" (default), and "right"
for x axes, and "top", "middle" (default), and "bottom" for y
axes.
The 'constraintoward' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right', 'top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["constraintoward"]
@constraintoward.setter
def constraintoward(self, val):
self["constraintoward"] = val
@property
def dividercolor(self):
"""
Sets the color of the dividers Only has an effect on
"multicategory" axes.
The 'dividercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["dividercolor"]
@dividercolor.setter
def dividercolor(self, val):
self["dividercolor"] = val
@property
def dividerwidth(self):
"""
Sets the width (in px) of the dividers Only has an effect on
"multicategory" axes.
The 'dividerwidth' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dividerwidth"]
@dividerwidth.setter
def dividerwidth(self, val):
self["dividerwidth"] = val
@property
def domain(self):
"""
Sets the domain of this axis (in plot fraction).
The 'domain' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'domain[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'domain[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def fixedrange(self):
"""
Determines whether or not this axis is zoom-able. If true, then
zoom is disabled.
The 'fixedrange' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["fixedrange"]
@fixedrange.setter
def fixedrange(self, val):
self["fixedrange"] = val
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
@property
def griddash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'griddash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["griddash"]
@griddash.setter
def griddash(self, val):
self["griddash"] = val
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
@property
def hoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'hoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hoverformat"]
@hoverformat.setter
def hoverformat(self, val):
self["hoverformat"] = val
@property
def insiderange(self):
"""
Could be used to set the desired inside range of this axis
(excluding the labels) when `ticklabelposition` of the anchored
axis has "inside". Not implemented for axes with `type` "log".
This would be ignored when `range` is provided.
The 'insiderange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'insiderange[0]' property accepts values of any type
(1) The 'insiderange[1]' property accepts values of any type
Returns
-------
list
"""
return self["insiderange"]
@insiderange.setter
def insiderange(self, val):
self["insiderange"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def layer(self):
"""
Sets the layer on which this axis is displayed. If *above
traces*, this axis is displayed above all the subplot's traces
If *below traces*, this axis is displayed below all the
subplot's traces, but above the grid lines. Useful when used
together with scatter-like traces with `cliponaxis` set to
False to show markers and/or text nodes above this axis.
The 'layer' property is an enumeration that may be specified as:
- One of the following enumeration values:
['above traces', 'below traces']
Returns
-------
Any
"""
return self["layer"]
@layer.setter
def layer(self, val):
self["layer"] = val
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
@property
def matches(self):
"""
If set to another axis id (e.g. `x2`, `y`), the range of this
axis will match the range of the corresponding axis in data-
coordinates space. Moreover, matching axes share auto-range
values, category lists and histogram auto-bins. Note that
setting axes simultaneously in both a `scaleanchor` and a
`matches` constraint is currently forbidden. Moreover, note
that matching axes must have the same `type`.
The 'matches' property is an enumeration that may be specified as:
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?( domain)?$',
'^y([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
Any
"""
return self["matches"]
@matches.setter
def matches(self, val):
self["matches"] = val
@property
def maxallowed(self):
"""
Determines the maximum range of this axis.
The 'maxallowed' property accepts values of any type
Returns
-------
Any
"""
return self["maxallowed"]
@maxallowed.setter
def maxallowed(self, val):
self["maxallowed"] = val
@property
def minallowed(self):
"""
Determines the minimum range of this axis.
The 'minallowed' property accepts values of any type
Returns
-------
Any
"""
return self["minallowed"]
@minallowed.setter
def minallowed(self, val):
self["minallowed"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def minor(self):
"""
The 'minor' property is an instance of Minor
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.Minor`
- A dict of string/value properties that will be passed
to the Minor constructor
Returns
-------
plotly.graph_objs.layout.xaxis.Minor
"""
return self["minor"]
@minor.setter
def minor(self, val):
self["minor"] = val
@property
def minorloglabels(self):
"""
Determines how minor log labels are displayed. If *small
digits*, small digits i.e. 2 or 5 are displayed. If "complete",
complete digits are displayed. If "none", no labels are
displayed.
The 'minorloglabels' property is an enumeration that may be specified as:
- One of the following enumeration values:
['small digits', 'complete', 'none']
Returns
-------
Any
"""
return self["minorloglabels"]
@minorloglabels.setter
def minorloglabels(self, val):
self["minorloglabels"] = val
@property
def mirror(self):
"""
Determines if the axis lines or/and ticks are mirrored to the
opposite side of the plotting area. If True, the axis lines are
mirrored. If "ticks", the axis lines and ticks are mirrored. If
False, mirroring is disable. If "all", axis lines are mirrored
on all shared-axes subplots. If "allticks", axis lines and
ticks are mirrored on all shared-axes subplots.
The 'mirror' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, 'ticks', False, 'all', 'allticks']
Returns
-------
Any
"""
return self["mirror"]
@mirror.setter
def mirror(self, val):
self["mirror"] = val
@property
def modebardisable(self):
"""
Disables certain modebar buttons for this axis. "autoscale"
disables the autoscale buttons, "zoominout" disables the zoom-
in and zoom-out buttons.
The 'modebardisable' property is a flaglist and may be specified
as a string containing:
- Any combination of ['autoscale', 'zoominout'] joined with '+' characters
(e.g. 'autoscale+zoominout')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["modebardisable"]
@modebardisable.setter
def modebardisable(self, val):
self["modebardisable"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def overlaying(self):
"""
If set a same-letter axis id, this axis is overlaid on top of
the corresponding same-letter axis, with traces and axes
visible for both axes. If False, this axis does not overlay any
same-letter axes. In this case, for axes with overlapping
domains only the highest-numbered axis will be visible.
The 'overlaying' property is an enumeration that may be specified as:
- One of the following enumeration values:
['free']
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?( domain)?$',
'^y([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
Any
"""
return self["overlaying"]
@overlaying.setter
def overlaying(self, val):
self["overlaying"] = val
@property
def position(self):
"""
Sets the position of this axis in the plotting space (in
normalized coordinates). Only has an effect if `anchor` is set
to "free".
The 'position' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["position"]
@position.setter
def position(self, val):
self["position"] = val
@property
def range(self):
"""
Sets the range of this axis. If the axis `type` is "log", then
you must take the log of your desired range (e.g. to set the
range from 1 to 100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings, like date data,
though Date objects and unix milliseconds will be accepted and
converted to strings. If the axis `type` is "category", it
should be numbers, using the scale where each category is
assigned a serial number from zero in the order it appears.
Leaving either or both elements `null` impacts the default
`autorange`.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
@property
def rangebreaks(self):
"""
The 'rangebreaks' property is a tuple of instances of
Rangebreak that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.xaxis.Rangebreak
- A list or tuple of dicts of string/value properties that
will be passed to the Rangebreak constructor
Returns
-------
tuple[plotly.graph_objs.layout.xaxis.Rangebreak]
"""
return self["rangebreaks"]
@rangebreaks.setter
def rangebreaks(self, val):
self["rangebreaks"] = val
@property
def rangebreakdefaults(self):
"""
When used in a template (as
layout.template.layout.xaxis.rangebreakdefaults), sets the
default property values to use for elements of
layout.xaxis.rangebreaks
The 'rangebreakdefaults' property is an instance of Rangebreak
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.Rangebreak`
- A dict of string/value properties that will be passed
to the Rangebreak constructor
Returns
-------
plotly.graph_objs.layout.xaxis.Rangebreak
"""
return self["rangebreakdefaults"]
@rangebreakdefaults.setter
def rangebreakdefaults(self, val):
self["rangebreakdefaults"] = val
@property
def rangemode(self):
"""
If "normal", the range is computed in relation to the extrema
of the input data. If "tozero", the range extends to 0,
regardless of the input data If "nonnegative", the range is
non-negative, regardless of the input data. Applies only to
linear axes.
The 'rangemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'tozero', 'nonnegative']
Returns
-------
Any
"""
return self["rangemode"]
@rangemode.setter
def rangemode(self, val):
self["rangemode"] = val
@property
def rangeselector(self):
"""
The 'rangeselector' property is an instance of Rangeselector
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.Rangeselector`
- A dict of string/value properties that will be passed
to the Rangeselector constructor
Returns
-------
plotly.graph_objs.layout.xaxis.Rangeselector
"""
return self["rangeselector"]
@rangeselector.setter
def rangeselector(self, val):
self["rangeselector"] = val
@property
def rangeslider(self):
"""
The 'rangeslider' property is an instance of Rangeslider
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.Rangeslider`
- A dict of string/value properties that will be passed
to the Rangeslider constructor
Returns
-------
plotly.graph_objs.layout.xaxis.Rangeslider
"""
return self["rangeslider"]
@rangeslider.setter
def rangeslider(self, val):
self["rangeslider"] = val
@property
def scaleanchor(self):
"""
If set to another axis id (e.g. `x2`, `y`), the range of this
axis changes together with the range of the corresponding axis
such that the scale of pixels per unit is in a constant ratio.
Both axes are still zoomable, but when you zoom one, the other
will zoom the same amount, keeping a fixed midpoint.
`constrain` and `constraintoward` determine how we enforce the
constraint. You can chain these, ie `yaxis: {scaleanchor: *x*},
xaxis2: {scaleanchor: *y*}` but you can only link axes of the
same `type`. The linked axis can have the opposite letter (to
constrain the aspect ratio) or the same letter (to match scales
across subplots). Loops (`yaxis: {scaleanchor: *x*}, xaxis:
{scaleanchor: *y*}` or longer) are redundant and the last
constraint encountered will be ignored to avoid possible
inconsistent constraints via `scaleratio`. Note that setting
axes simultaneously in both a `scaleanchor` and a `matches`
constraint is currently forbidden. Setting `false` allows to
remove a default constraint (occasionally, you may need to
prevent a default `scaleanchor` constraint from being applied,
eg. when having an image trace `yaxis: {scaleanchor: "x"}` is
set automatically in order for pixels to be rendered as
squares, setting `yaxis: {scaleanchor: false}` allows to remove
the constraint).
The 'scaleanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
[False]
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?( domain)?$',
'^y([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
Any
"""
return self["scaleanchor"]
@scaleanchor.setter
def scaleanchor(self, val):
self["scaleanchor"] = val
@property
def scaleratio(self):
"""
If this axis is linked to another by `scaleanchor`, this
determines the pixel to unit scale ratio. For example, if this
value is 10, then every unit on this axis spans 10 times the
number of pixels as a unit on the linked axis. Use this for
example to create an elevation profile where the vertical scale
is exaggerated a fixed amount with respect to the horizontal.
The 'scaleratio' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["scaleratio"]
@scaleratio.setter
def scaleratio(self, val):
self["scaleratio"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showdividers(self):
"""
Determines whether or not a dividers are drawn between the
category levels of this axis. Only has an effect on
"multicategory" axes.
The 'showdividers' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showdividers"]
@showdividers.setter
def showdividers(self, val):
self["showdividers"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
@property
def showspikes(self):
"""
Determines whether or not spikes (aka droplines) are drawn for
this axis. Note: This only takes affect when hovermode =
closest
The 'showspikes' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showspikes"]
@showspikes.setter
def showspikes(self, val):
self["showspikes"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def side(self):
"""
Determines whether a x (y) axis is positioned at the "bottom"
("left") or "top" ("right") of the plotting area.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'bottom', 'left', 'right']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def spikecolor(self):
"""
Sets the spike color. If undefined, will use the series color
The 'spikecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["spikecolor"]
@spikecolor.setter
def spikecolor(self, val):
self["spikecolor"] = val
@property
def spikedash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'spikedash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["spikedash"]
@spikedash.setter
def spikedash(self, val):
self["spikedash"] = val
@property
def spikemode(self):
"""
Determines the drawing mode for the spike line If "toaxis", the
line is drawn from the data point to the axis the series is
plotted on. If "across", the line is drawn across the entire
plot area, and supercedes "toaxis". If "marker", then a marker
dot is drawn on the axis the series is plotted on
The 'spikemode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['toaxis', 'across', 'marker'] joined with '+' characters
(e.g. 'toaxis+across')
Returns
-------
Any
"""
return self["spikemode"]
@spikemode.setter
def spikemode(self, val):
self["spikemode"] = val
@property
def spikesnap(self):
"""
Determines whether spikelines are stuck to the cursor or to the
closest datapoints.
The 'spikesnap' property is an enumeration that may be specified as:
- One of the following enumeration values:
['data', 'cursor', 'hovered data']
Returns
-------
Any
"""
return self["spikesnap"]
@spikesnap.setter
def spikesnap(self, val):
self["spikesnap"] = val
@property
def spikethickness(self):
"""
Sets the width (in px) of the zero line.
The 'spikethickness' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["spikethickness"]
@spikethickness.setter
def spikethickness(self, val):
self["spikethickness"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.layout.xaxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.xaxis.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.layout.xaxis.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.layout.xaxis.tickformatstopdefaults), sets the
default property values to use for elements of
layout.xaxis.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.layout.xaxis.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabelindex(self):
"""
Only for axes with `type` "date" or "linear". Instead of
drawing the major tick label, draw the label for the minor tick
that is n positions away from the major tick. E.g. to always
draw the label for the minor tick before each major tick,
choose `ticklabelindex` -1. This is useful for date axes with
`ticklabelmode` "period" if you want to label the period that
ends with each major tick instead of the period that begins
there.
The 'ticklabelindex' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["ticklabelindex"]
@ticklabelindex.setter
def ticklabelindex(self, val):
self["ticklabelindex"] = val
@property
def ticklabelindexsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`ticklabelindex`.
The 'ticklabelindexsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticklabelindexsrc"]
@ticklabelindexsrc.setter
def ticklabelindexsrc(self, val):
self["ticklabelindexsrc"] = val
@property
def ticklabelmode(self):
"""
Determines where tick labels are drawn with respect to their
corresponding ticks and grid lines. Only has an effect for axes
of `type` "date" When set to "period", tick labels are drawn in
the middle of the period between ticks.
The 'ticklabelmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['instant', 'period']
Returns
-------
Any
"""
return self["ticklabelmode"]
@ticklabelmode.setter
def ticklabelmode(self, val):
self["ticklabelmode"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. Otherwise on
"category" and "multicategory" axes the default is "allow". In
other cases the default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn with respect to the
axis. Please note that top or bottom has no effect on x axes or
when `ticklabelmode` is set to "period" or when `tickson` is
set to "boundaries". Similarly, left or right has no effect on
y axes or when `ticklabelmode` is set to "period" or when
`tickson` is set to "boundaries". Has no effect on
"multicategory" axes. When used on axes linked by `matches` or
`scaleanchor`, no extra padding for inside labels would be
added by autorange, so that the scales could match.
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelshift(self):
"""
Shifts the tick labels by the specified number of pixels in
parallel to the axis. Positive values move the labels in the
positive direction of the axis.
The 'ticklabelshift' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["ticklabelshift"]
@ticklabelshift.setter
def ticklabelshift(self, val):
self["ticklabelshift"] = val
@property
def ticklabelstandoff(self):
"""
Sets the standoff distance (in px) between the axis tick labels
and their default position. A positive `ticklabelstandoff`
moves the labels farther away from the plot area if
`ticklabelposition` is "outside", and deeper into the plot area
if `ticklabelposition` is "inside". A negative
`ticklabelstandoff` works in the opposite direction, moving
outside ticks towards the plot area and inside ticks towards
the outside. If the negative value is large enough, inside
ticks can even end up outside and vice versa.
The 'ticklabelstandoff' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["ticklabelstandoff"]
@ticklabelstandoff.setter
def ticklabelstandoff(self, val):
self["ticklabelstandoff"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided). If "sync", the
number of ticks will sync with the overlayed axis set by
`overlaying` property.
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array', 'sync']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def tickson(self):
"""
Determines where ticks and grid lines are drawn with respect to
their corresponding tick labels. Only has an effect for axes of
`type` "category" or "multicategory". When set to "boundaries",
ticks and grid lines are drawn half a category to the
left/bottom of labels.
The 'tickson' property is an enumeration that may be specified as:
- One of the following enumeration values:
['labels', 'boundaries']
Returns
-------
Any
"""
return self["tickson"]
@tickson.setter
def tickson(self, val):
self["tickson"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.layout.xaxis.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def type(self):
"""
Sets the axis type. By default, plotly attempts to determined
the axis type by looking into the data of the traces that
referenced the axis in question.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['-', 'linear', 'log', 'date', 'category',
'multicategory']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def uirevision(self):
"""
Controls persistence of user-driven changes in axis `range`,
`autorange`, and `title` if in `editable: true` configuration.
Defaults to `layout.uirevision`.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def unifiedhovertitle(self):
"""
The 'unifiedhovertitle' property is an instance of Unifiedhovertitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.Unifiedhovertitle`
- A dict of string/value properties that will be passed
to the Unifiedhovertitle constructor
Returns
-------
plotly.graph_objs.layout.xaxis.Unifiedhovertitle
"""
return self["unifiedhovertitle"]
@unifiedhovertitle.setter
def unifiedhovertitle(self, val):
self["unifiedhovertitle"] = val
@property
def visible(self):
"""
A single toggle to hide the axis while preserving interaction
like dragging. Default is true when a cheater plot is present
on the axis, otherwise false
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def zeroline(self):
"""
Determines whether or not a line is drawn at along the 0 value
of this axis. If True, the zero line is drawn on top of the
grid lines.
The 'zeroline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zeroline"]
@zeroline.setter
def zeroline(self, val):
self["zeroline"] = val
@property
def zerolinecolor(self):
"""
Sets the line color of the zero line.
The 'zerolinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["zerolinecolor"]
@zerolinecolor.setter
def zerolinecolor(self, val):
self["zerolinecolor"] = val
@property
def zerolinelayer(self):
"""
Sets the layer on which this zeroline is displayed. If *above
traces*, this zeroline is displayed above all the subplot's
traces If *below traces*, this zeroline is displayed below all
the subplot's traces, but above the grid lines. Limitation:
"zerolinelayer" currently has no effect if the "zorder"
property is set on any trace.
The 'zerolinelayer' property is an enumeration that may be specified as:
- One of the following enumeration values:
['above traces', 'below traces']
Returns
-------
Any
"""
return self["zerolinelayer"]
@zerolinelayer.setter
def zerolinelayer(self, val):
self["zerolinelayer"] = val
@property
def zerolinewidth(self):
"""
Sets the width (in px) of the zero line.
The 'zerolinewidth' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zerolinewidth"]
@zerolinewidth.setter
def zerolinewidth(self, val):
self["zerolinewidth"] = val
@property
def _prop_descriptions(self):
return """\
anchor
If set to an opposite-letter axis id (e.g. `x2`, `y`),
this axis is bound to the corresponding opposite-letter
axis. If set to "free", this axis' position is
determined by `position`.
automargin
Determines whether long tick labels automatically grow
the figure margins.
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided and it has a
value for both the lower and upper bound, `autorange`
is set to False. Using "min" applies autorange only to
set the minimum. Using "max" applies autorange only to
set the maximum. Using *min reversed* applies autorange
only to set the minimum on a reversed axis. Using *max
reversed* applies autorange only to set the maximum on
a reversed axis. Using "reversed" applies autorange on
both ends and reverses the axis direction.
autorangeoptions
:class:`plotly.graph_objects.layout.xaxis.Autorangeopti
ons` instance or dict with compatible properties
autotickangles
When `tickangle` is set to "auto", it will be set to
the first angle in this array that is large enough to
prevent label overlap.
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean, geometric mean or median of all
the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
constrain
If this axis needs to be compressed (either due to its
own `scaleanchor` and `scaleratio` or those of the
other axis), determines how that happens: by increasing
the "range", or by decreasing the "domain". Default is
"domain" for axes containing image traces, "range"
otherwise.
constraintoward
If this axis needs to be compressed (either due to its
own `scaleanchor` and `scaleratio` or those of the
other axis), determines which direction we push the
originally specified plot area. Options are "left",
"center" (default), and "right" for x axes, and "top",
"middle" (default), and "bottom" for y axes.
dividercolor
Sets the color of the dividers Only has an effect on
"multicategory" axes.
dividerwidth
Sets the width (in px) of the dividers Only has an
effect on "multicategory" axes.
domain
Sets the domain of this axis (in plot fraction).
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
fixedrange
Determines whether or not this axis is zoom-able. If
true, then zoom is disabled.
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
insiderange
Could be used to set the desired inside range of this
axis (excluding the labels) when `ticklabelposition` of
the anchored axis has "inside". Not implemented for
axes with `type` "log". This would be ignored when
`range` is provided.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
layer
Sets the layer on which this axis is displayed. If
*above traces*, this axis is displayed above all the
subplot's traces If *below traces*, this axis is
displayed below all the subplot's traces, but above the
grid lines. Useful when used together with scatter-like
traces with `cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
matches
If set to another axis id (e.g. `x2`, `y`), the range
of this axis will match the range of the corresponding
axis in data-coordinates space. Moreover, matching axes
share auto-range values, category lists and histogram
auto-bins. Note that setting axes simultaneously in
both a `scaleanchor` and a `matches` constraint is
currently forbidden. Moreover, note that matching axes
must have the same `type`.
maxallowed
Determines the maximum range of this axis.
minallowed
Determines the minimum range of this axis.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
minor
:class:`plotly.graph_objects.layout.xaxis.Minor`
instance or dict with compatible properties
minorloglabels
Determines how minor log labels are displayed. If
*small digits*, small digits i.e. 2 or 5 are displayed.
If "complete", complete digits are displayed. If
"none", no labels are displayed.
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
modebardisable
Disables certain modebar buttons for this axis.
"autoscale" disables the autoscale buttons, "zoominout"
disables the zoom-in and zoom-out buttons.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
overlaying
If set a same-letter axis id, this axis is overlaid on
top of the corresponding same-letter axis, with traces
and axes visible for both axes. If False, this axis
does not overlay any same-letter axes. In this case,
for axes with overlapping domains only the highest-
numbered axis will be visible.
position
Sets the position of this axis in the plotting space
(in normalized coordinates). Only has an effect if
`anchor` is set to "free".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears. Leaving either or both elements `null` impacts
the default `autorange`.
rangebreaks
A tuple of
:class:`plotly.graph_objects.layout.xaxis.Rangebreak`
instances or dicts with compatible properties
rangebreakdefaults
When used in a template (as
layout.template.layout.xaxis.rangebreakdefaults), sets
the default property values to use for elements of
layout.xaxis.rangebreaks
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If "tozero", the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
rangeselector
:class:`plotly.graph_objects.layout.xaxis.Rangeselector
` instance or dict with compatible properties
rangeslider
:class:`plotly.graph_objects.layout.xaxis.Rangeslider`
instance or dict with compatible properties
scaleanchor
If set to another axis id (e.g. `x2`, `y`), the range
of this axis changes together with the range of the
corresponding axis such that the scale of pixels per
unit is in a constant ratio. Both axes are still
zoomable, but when you zoom one, the other will zoom
the same amount, keeping a fixed midpoint. `constrain`
and `constraintoward` determine how we enforce the
constraint. You can chain these, ie `yaxis:
{scaleanchor: *x*}, xaxis2: {scaleanchor: *y*}` but you
can only link axes of the same `type`. The linked axis
can have the opposite letter (to constrain the aspect
ratio) or the same letter (to match scales across
subplots). Loops (`yaxis: {scaleanchor: *x*}, xaxis:
{scaleanchor: *y*}` or longer) are redundant and the
last constraint encountered will be ignored to avoid
possible inconsistent constraints via `scaleratio`.
Note that setting axes simultaneously in both a
`scaleanchor` and a `matches` constraint is currently
forbidden. Setting `false` allows to remove a default
constraint (occasionally, you may need to prevent a
default `scaleanchor` constraint from being applied,
eg. when having an image trace `yaxis: {scaleanchor:
"x"}` is set automatically in order for pixels to be
rendered as squares, setting `yaxis: {scaleanchor:
false}` allows to remove the constraint).
scaleratio
If this axis is linked to another by `scaleanchor`,
this determines the pixel to unit scale ratio. For
example, if this value is 10, then every unit on this
axis spans 10 times the number of pixels as a unit on
the linked axis. Use this for example to create an
elevation profile where the vertical scale is
exaggerated a fixed amount with respect to the
horizontal.
separatethousands
If "true", even 4-digit integers are separated
showdividers
Determines whether or not a dividers are drawn between
the category levels of this axis. Only has an effect on
"multicategory" axes.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Determines whether or not spikes (aka droplines) are
drawn for this axis. Note: This only takes affect when
hovermode = closest
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
side
Determines whether a x (y) axis is positioned at the
"bottom" ("left") or "top" ("right") of the plotting
area.
spikecolor
Sets the spike color. If undefined, will use the series
color
spikedash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
spikemode
Determines the drawing mode for the spike line If
"toaxis", the line is drawn from the data point to the
axis the series is plotted on. If "across", the line
is drawn across the entire plot area, and supercedes
"toaxis". If "marker", then a marker dot is drawn on
the axis the series is plotted on
spikesnap
Determines whether spikelines are stuck to the cursor
or to the closest datapoints.
spikethickness
Sets the width (in px) of the zero line.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.xaxis.Ti
ckformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as
layout.template.layout.xaxis.tickformatstopdefaults),
sets the default property values to use for elements of
layout.xaxis.tickformatstops
ticklabelindex
Only for axes with `type` "date" or "linear". Instead
of drawing the major tick label, draw the label for the
minor tick that is n positions away from the major
tick. E.g. to always draw the label for the minor tick
before each major tick, choose `ticklabelindex` -1.
This is useful for date axes with `ticklabelmode`
"period" if you want to label the period that ends with
each major tick instead of the period that begins
there.
ticklabelindexsrc
Sets the source reference on Chart Studio Cloud for
`ticklabelindex`.
ticklabelmode
Determines where tick labels are drawn with respect to
their corresponding ticks and grid lines. Only has an
effect for axes of `type` "date" When set to "period",
tick labels are drawn in the middle of the period
between ticks.
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. Otherwise on "category" and
"multicategory" axes the default is "allow". In other
cases the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn with respect to
the axis. Please note that top or bottom has no effect
on x axes or when `ticklabelmode` is set to "period" or
when `tickson` is set to "boundaries". Similarly, left
or right has no effect on y axes or when
`ticklabelmode` is set to "period" or when `tickson` is
set to "boundaries". Has no effect on "multicategory"
axes. When used on axes linked by `matches` or
`scaleanchor`, no extra padding for inside labels would
be added by autorange, so that the scales could match.
ticklabelshift
Shifts the tick labels by the specified number of
pixels in parallel to the axis. Positive values move
the labels in the positive direction of the axis.
ticklabelstandoff
Sets the standoff distance (in px) between the axis
tick labels and their default position. A positive
`ticklabelstandoff` moves the labels farther away from
the plot area if `ticklabelposition` is "outside", and
deeper into the plot area if `ticklabelposition` is
"inside". A negative `ticklabelstandoff` works in the
opposite direction, moving outside ticks towards the
plot area and inside ticks towards the outside. If the
negative value is large enough, inside ticks can even
end up outside and vice versa.
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided). If
"sync", the number of ticks will sync with the
overlayed axis set by `overlaying` property.
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
tickson
Determines where ticks and grid lines are drawn with
respect to their corresponding tick labels. Only has an
effect for axes of `type` "category" or
"multicategory". When set to "boundaries", ticks and
grid lines are drawn half a category to the left/bottom
of labels.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.xaxis.Title`
instance or dict with compatible properties
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
uirevision
Controls persistence of user-driven changes in axis
`range`, `autorange`, and `title` if in `editable:
true` configuration. Defaults to `layout.uirevision`.
unifiedhovertitle
:class:`plotly.graph_objects.layout.xaxis.Unifiedhovert
itle` instance or dict with compatible properties
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinelayer
Sets the layer on which this zeroline is displayed. If
*above traces*, this zeroline is displayed above all
the subplot's traces If *below traces*, this zeroline
is displayed below all the subplot's traces, but above
the grid lines. Limitation: "zerolinelayer" currently
has no effect if the "zorder" property is set on any
trace.
zerolinewidth
Sets the width (in px) of the zero line.
"""
def __init__(
self,
arg=None,
anchor=None,
automargin=None,
autorange=None,
autorangeoptions=None,
autotickangles=None,
autotypenumbers=None,
calendar=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
color=None,
constrain=None,
constraintoward=None,
dividercolor=None,
dividerwidth=None,
domain=None,
dtick=None,
exponentformat=None,
fixedrange=None,
gridcolor=None,
griddash=None,
gridwidth=None,
hoverformat=None,
insiderange=None,
labelalias=None,
layer=None,
linecolor=None,
linewidth=None,
matches=None,
maxallowed=None,
minallowed=None,
minexponent=None,
minor=None,
minorloglabels=None,
mirror=None,
modebardisable=None,
nticks=None,
overlaying=None,
position=None,
range=None,
rangebreaks=None,
rangebreakdefaults=None,
rangemode=None,
rangeselector=None,
rangeslider=None,
scaleanchor=None,
scaleratio=None,
separatethousands=None,
showdividers=None,
showexponent=None,
showgrid=None,
showline=None,
showspikes=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
side=None,
spikecolor=None,
spikedash=None,
spikemode=None,
spikesnap=None,
spikethickness=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabelindex=None,
ticklabelindexsrc=None,
ticklabelmode=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelshift=None,
ticklabelstandoff=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
tickson=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
type=None,
uirevision=None,
unifiedhovertitle=None,
visible=None,
zeroline=None,
zerolinecolor=None,
zerolinelayer=None,
zerolinewidth=None,
**kwargs,
):
"""
Construct a new XAxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.XAxis`
anchor
If set to an opposite-letter axis id (e.g. `x2`, `y`),
this axis is bound to the corresponding opposite-letter
axis. If set to "free", this axis' position is
determined by `position`.
automargin
Determines whether long tick labels automatically grow
the figure margins.
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided and it has a
value for both the lower and upper bound, `autorange`
is set to False. Using "min" applies autorange only to
set the minimum. Using "max" applies autorange only to
set the maximum. Using *min reversed* applies autorange
only to set the minimum on a reversed axis. Using *max
reversed* applies autorange only to set the maximum on
a reversed axis. Using "reversed" applies autorange on
both ends and reverses the axis direction.
autorangeoptions
:class:`plotly.graph_objects.layout.xaxis.Autorangeopti
ons` instance or dict with compatible properties
autotickangles
When `tickangle` is set to "auto", it will be set to
the first angle in this array that is large enough to
prevent label overlap.
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean, geometric mean or median of all
the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
constrain
If this axis needs to be compressed (either due to its
own `scaleanchor` and `scaleratio` or those of the
other axis), determines how that happens: by increasing
the "range", or by decreasing the "domain". Default is
"domain" for axes containing image traces, "range"
otherwise.
constraintoward
If this axis needs to be compressed (either due to its
own `scaleanchor` and `scaleratio` or those of the
other axis), determines which direction we push the
originally specified plot area. Options are "left",
"center" (default), and "right" for x axes, and "top",
"middle" (default), and "bottom" for y axes.
dividercolor
Sets the color of the dividers Only has an effect on
"multicategory" axes.
dividerwidth
Sets the width (in px) of the dividers Only has an
effect on "multicategory" axes.
domain
Sets the domain of this axis (in plot fraction).
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
fixedrange
Determines whether or not this axis is zoom-able. If
true, then zoom is disabled.
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
insiderange
Could be used to set the desired inside range of this
axis (excluding the labels) when `ticklabelposition` of
the anchored axis has "inside". Not implemented for
axes with `type` "log". This would be ignored when
`range` is provided.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
layer
Sets the layer on which this axis is displayed. If
*above traces*, this axis is displayed above all the
subplot's traces If *below traces*, this axis is
displayed below all the subplot's traces, but above the
grid lines. Useful when used together with scatter-like
traces with `cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
matches
If set to another axis id (e.g. `x2`, `y`), the range
of this axis will match the range of the corresponding
axis in data-coordinates space. Moreover, matching axes
share auto-range values, category lists and histogram
auto-bins. Note that setting axes simultaneously in
both a `scaleanchor` and a `matches` constraint is
currently forbidden. Moreover, note that matching axes
must have the same `type`.
maxallowed
Determines the maximum range of this axis.
minallowed
Determines the minimum range of this axis.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
minor
:class:`plotly.graph_objects.layout.xaxis.Minor`
instance or dict with compatible properties
minorloglabels
Determines how minor log labels are displayed. If
*small digits*, small digits i.e. 2 or 5 are displayed.
If "complete", complete digits are displayed. If
"none", no labels are displayed.
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
modebardisable
Disables certain modebar buttons for this axis.
"autoscale" disables the autoscale buttons, "zoominout"
disables the zoom-in and zoom-out buttons.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
overlaying
If set a same-letter axis id, this axis is overlaid on
top of the corresponding same-letter axis, with traces
and axes visible for both axes. If False, this axis
does not overlay any same-letter axes. In this case,
for axes with overlapping domains only the highest-
numbered axis will be visible.
position
Sets the position of this axis in the plotting space
(in normalized coordinates). Only has an effect if
`anchor` is set to "free".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears. Leaving either or both elements `null` impacts
the default `autorange`.
rangebreaks
A tuple of
:class:`plotly.graph_objects.layout.xaxis.Rangebreak`
instances or dicts with compatible properties
rangebreakdefaults
When used in a template (as
layout.template.layout.xaxis.rangebreakdefaults), sets
the default property values to use for elements of
layout.xaxis.rangebreaks
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If "tozero", the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
rangeselector
:class:`plotly.graph_objects.layout.xaxis.Rangeselector
` instance or dict with compatible properties
rangeslider
:class:`plotly.graph_objects.layout.xaxis.Rangeslider`
instance or dict with compatible properties
scaleanchor
If set to another axis id (e.g. `x2`, `y`), the range
of this axis changes together with the range of the
corresponding axis such that the scale of pixels per
unit is in a constant ratio. Both axes are still
zoomable, but when you zoom one, the other will zoom
the same amount, keeping a fixed midpoint. `constrain`
and `constraintoward` determine how we enforce the
constraint. You can chain these, ie `yaxis:
{scaleanchor: *x*}, xaxis2: {scaleanchor: *y*}` but you
can only link axes of the same `type`. The linked axis
can have the opposite letter (to constrain the aspect
ratio) or the same letter (to match scales across
subplots). Loops (`yaxis: {scaleanchor: *x*}, xaxis:
{scaleanchor: *y*}` or longer) are redundant and the
last constraint encountered will be ignored to avoid
possible inconsistent constraints via `scaleratio`.
Note that setting axes simultaneously in both a
`scaleanchor` and a `matches` constraint is currently
forbidden. Setting `false` allows to remove a default
constraint (occasionally, you may need to prevent a
default `scaleanchor` constraint from being applied,
eg. when having an image trace `yaxis: {scaleanchor:
"x"}` is set automatically in order for pixels to be
rendered as squares, setting `yaxis: {scaleanchor:
false}` allows to remove the constraint).
scaleratio
If this axis is linked to another by `scaleanchor`,
this determines the pixel to unit scale ratio. For
example, if this value is 10, then every unit on this
axis spans 10 times the number of pixels as a unit on
the linked axis. Use this for example to create an
elevation profile where the vertical scale is
exaggerated a fixed amount with respect to the
horizontal.
separatethousands
If "true", even 4-digit integers are separated
showdividers
Determines whether or not a dividers are drawn between
the category levels of this axis. Only has an effect on
"multicategory" axes.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Determines whether or not spikes (aka droplines) are
drawn for this axis. Note: This only takes affect when
hovermode = closest
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
side
Determines whether a x (y) axis is positioned at the
"bottom" ("left") or "top" ("right") of the plotting
area.
spikecolor
Sets the spike color. If undefined, will use the series
color
spikedash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
spikemode
Determines the drawing mode for the spike line If
"toaxis", the line is drawn from the data point to the
axis the series is plotted on. If "across", the line
is drawn across the entire plot area, and supercedes
"toaxis". If "marker", then a marker dot is drawn on
the axis the series is plotted on
spikesnap
Determines whether spikelines are stuck to the cursor
or to the closest datapoints.
spikethickness
Sets the width (in px) of the zero line.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.xaxis.Ti
ckformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as
layout.template.layout.xaxis.tickformatstopdefaults),
sets the default property values to use for elements of
layout.xaxis.tickformatstops
ticklabelindex
Only for axes with `type` "date" or "linear". Instead
of drawing the major tick label, draw the label for the
minor tick that is n positions away from the major
tick. E.g. to always draw the label for the minor tick
before each major tick, choose `ticklabelindex` -1.
This is useful for date axes with `ticklabelmode`
"period" if you want to label the period that ends with
each major tick instead of the period that begins
there.
ticklabelindexsrc
Sets the source reference on Chart Studio Cloud for
`ticklabelindex`.
ticklabelmode
Determines where tick labels are drawn with respect to
their corresponding ticks and grid lines. Only has an
effect for axes of `type` "date" When set to "period",
tick labels are drawn in the middle of the period
between ticks.
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. Otherwise on "category" and
"multicategory" axes the default is "allow". In other
cases the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn with respect to
the axis. Please note that top or bottom has no effect
on x axes or when `ticklabelmode` is set to "period" or
when `tickson` is set to "boundaries". Similarly, left
or right has no effect on y axes or when
`ticklabelmode` is set to "period" or when `tickson` is
set to "boundaries". Has no effect on "multicategory"
axes. When used on axes linked by `matches` or
`scaleanchor`, no extra padding for inside labels would
be added by autorange, so that the scales could match.
ticklabelshift
Shifts the tick labels by the specified number of
pixels in parallel to the axis. Positive values move
the labels in the positive direction of the axis.
ticklabelstandoff
Sets the standoff distance (in px) between the axis
tick labels and their default position. A positive
`ticklabelstandoff` moves the labels farther away from
the plot area if `ticklabelposition` is "outside", and
deeper into the plot area if `ticklabelposition` is
"inside". A negative `ticklabelstandoff` works in the
opposite direction, moving outside ticks towards the
plot area and inside ticks towards the outside. If the
negative value is large enough, inside ticks can even
end up outside and vice versa.
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided). If
"sync", the number of ticks will sync with the
overlayed axis set by `overlaying` property.
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
tickson
Determines where ticks and grid lines are drawn with
respect to their corresponding tick labels. Only has an
effect for axes of `type` "category" or
"multicategory". When set to "boundaries", ticks and
grid lines are drawn half a category to the left/bottom
of labels.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.xaxis.Title`
instance or dict with compatible properties
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
uirevision
Controls persistence of user-driven changes in axis
`range`, `autorange`, and `title` if in `editable:
true` configuration. Defaults to `layout.uirevision`.
unifiedhovertitle
:class:`plotly.graph_objects.layout.xaxis.Unifiedhovert
itle` instance or dict with compatible properties
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinelayer
Sets the layer on which this zeroline is displayed. If
*above traces*, this zeroline is displayed above all
the subplot's traces If *below traces*, this zeroline
is displayed below all the subplot's traces, but above
the grid lines. Limitation: "zerolinelayer" currently
has no effect if the "zorder" property is set on any
trace.
zerolinewidth
Sets the width (in px) of the zero line.
Returns
-------
XAxis
"""
super().__init__("xaxis")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.XAxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.XAxis`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("anchor", arg, anchor)
self._set_property("automargin", arg, automargin)
self._set_property("autorange", arg, autorange)
self._set_property("autorangeoptions", arg, autorangeoptions)
self._set_property("autotickangles", arg, autotickangles)
self._set_property("autotypenumbers", arg, autotypenumbers)
self._set_property("calendar", arg, calendar)
self._set_property("categoryarray", arg, categoryarray)
self._set_property("categoryarraysrc", arg, categoryarraysrc)
self._set_property("categoryorder", arg, categoryorder)
self._set_property("color", arg, color)
self._set_property("constrain", arg, constrain)
self._set_property("constraintoward", arg, constraintoward)
self._set_property("dividercolor", arg, dividercolor)
self._set_property("dividerwidth", arg, dividerwidth)
self._set_property("domain", arg, domain)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("fixedrange", arg, fixedrange)
self._set_property("gridcolor", arg, gridcolor)
self._set_property("griddash", arg, griddash)
self._set_property("gridwidth", arg, gridwidth)
self._set_property("hoverformat", arg, hoverformat)
self._set_property("insiderange", arg, insiderange)
self._set_property("labelalias", arg, labelalias)
self._set_property("layer", arg, layer)
self._set_property("linecolor", arg, linecolor)
self._set_property("linewidth", arg, linewidth)
self._set_property("matches", arg, matches)
self._set_property("maxallowed", arg, maxallowed)
self._set_property("minallowed", arg, minallowed)
self._set_property("minexponent", arg, minexponent)
self._set_property("minor", arg, minor)
self._set_property("minorloglabels", arg, minorloglabels)
self._set_property("mirror", arg, mirror)
self._set_property("modebardisable", arg, modebardisable)
self._set_property("nticks", arg, nticks)
self._set_property("overlaying", arg, overlaying)
self._set_property("position", arg, position)
self._set_property("range", arg, range)
self._set_property("rangebreaks", arg, rangebreaks)
self._set_property("rangebreakdefaults", arg, rangebreakdefaults)
self._set_property("rangemode", arg, rangemode)
self._set_property("rangeselector", arg, rangeselector)
self._set_property("rangeslider", arg, rangeslider)
self._set_property("scaleanchor", arg, scaleanchor)
self._set_property("scaleratio", arg, scaleratio)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showdividers", arg, showdividers)
self._set_property("showexponent", arg, showexponent)
self._set_property("showgrid", arg, showgrid)
self._set_property("showline", arg, showline)
self._set_property("showspikes", arg, showspikes)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("side", arg, side)
self._set_property("spikecolor", arg, spikecolor)
self._set_property("spikedash", arg, spikedash)
self._set_property("spikemode", arg, spikemode)
self._set_property("spikesnap", arg, spikesnap)
self._set_property("spikethickness", arg, spikethickness)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabelindex", arg, ticklabelindex)
self._set_property("ticklabelindexsrc", arg, ticklabelindexsrc)
self._set_property("ticklabelmode", arg, ticklabelmode)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelshift", arg, ticklabelshift)
self._set_property("ticklabelstandoff", arg, ticklabelstandoff)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("tickson", arg, tickson)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("type", arg, type)
self._set_property("uirevision", arg, uirevision)
self._set_property("unifiedhovertitle", arg, unifiedhovertitle)
self._set_property("visible", arg, visible)
self._set_property("zeroline", arg, zeroline)
self._set_property("zerolinecolor", arg, zerolinecolor)
self._set_property("zerolinelayer", arg, zerolinelayer)
self._set_property("zerolinewidth", arg, zerolinewidth)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| XAxis |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/sqlite_storage.py | {
"start": 945,
"end": 1277
} | class ____(TypedDict):
base_dir: str
def _runs_directory(base: str) -> str:
return os.path.join(base, "history", "")
def _event_logs_directory(base: str) -> str:
return os.path.join(base, "history", "runs", "")
def _schedule_directory(base: str) -> str:
return os.path.join(base, "schedules")
| SqliteStorageConfig |
python | huggingface__transformers | src/transformers/models/seamless_m4t/modeling_seamless_m4t.py | {
"start": 163088,
"end": 184697
} | class ____(SeamlessM4TPreTrainedModel, GenerationMixin):
input_modalities = ("audio", "text")
output_modalities = ("audio", "text")
_tied_weights_keys = {
"lm_head.weight": "shared.weight",
"text_encoder.embed_tokens.weight": "shared.weight",
"text_decoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config, current_modality="text"):
r"""
current_modality (`str`, *optional*, defaults to `"text"`):
Default modality. Used to initialize the model.
"""
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.text_encoder = SeamlessM4TEncoder(config)
self.speech_encoder = SeamlessM4TSpeechEncoder(config)
self.text_decoder = SeamlessM4TDecoder(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.current_modality = current_modality
if current_modality == "speech":
self.main_input_name = "input_features"
# these models already call post_init in their initialization
self.t2u_model = SeamlessM4TTextToUnitForConditionalGeneration(config)
self.vocoder = SeamlessM4TCodeHifiGan(config)
# Initialize weights and apply final processing
self.post_init()
def set_modality(self, modality="text"):
if modality == "text":
self.main_input_name = "input_ids"
self.current_modality = "text"
elif modality == "speech":
self.main_input_name = "input_features"
self.current_modality = "speech"
else:
raise ValueError(f"`modality={modality}` is not a valid modality. It must be `text` or `speech`.")
def get_encoder(self):
if self.current_modality == "text":
return self.text_encoder
else:
return self.speech_encoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_encoder.embed_tokens = value
self.text_decoder.embed_tokens = value
self.shared = value
@auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
input_features: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
if input_ids is None and input_features is None and inputs_embeds is None and encoder_outputs is None:
raise ValueError(
"`input_ids`,`input_features`, `inputs_embeds` and `encoder_outputs` are all empty. Make sure at least one of them is not."
)
elif input_features is not None:
if input_ids is not None:
logger.warning(
"`input_ids` is not `None` but `input_features` has been given."
"`input_features` will be used in priority through the `speech_encoder`. "
"Make sure that `input_features` and `input_ids` are mutually exclusive."
)
if inputs_embeds is not None:
logger.warning(
"`inputs_embeds` is not `None` but `input_features` has been given."
"`input_features` will be used in priority through `speech_encoder`. "
"`inputs_embeds` will be ignored."
)
# if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
logger.warning(
"This calls the same method `forward` as `SeamlessM4TForTextToText` and `SeamlessM4TForSpeechToText`"
"depending on the input modality. If you want to generate speech, use the `generate` method."
)
self.set_modality("speech")
encoder_outputs = self.speech_encoder(
input_features=input_features,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif input_ids is not None or inputs_embeds is not None:
# if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
logger.warning(
"This calls the same method `forward` as `SeamlessM4TForTextToText` and `SeamlessM4TForSpeechToText`"
"depending on the input modality. If you want to generate speech, use the `generate` method."
)
self.set_modality("text")
encoder_outputs = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
encoder_attention_mask = attention_mask
# input modality = speech so new attention mask
if self.current_modality == "speech" and attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
encoder_outputs[0].device
)
encoder_attention_mask = _compute_new_attention_mask(
hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.text_decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.Tensor] = None,
input_features: Optional[torch.Tensor] = None,
return_intermediate_token_ids: Optional[bool] = None,
tgt_lang: Optional[str] = None,
spkr_id: Optional[int] = 0,
generate_speech: Optional[bool] = True,
**kwargs,
) -> Union[torch.Tensor, SeamlessM4TGenerationOutput]:
"""
Generates translated token ids and/or translated audio waveforms.
<Tip>
This method successively calls the `.generate` function of two different sub-models. You can specify keyword
arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
that will be passed to one of them.
For example, calling `.generate(input_ids=input_ids, num_beams=4, speech_do_sample=True)` will successively
perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`, *optional*):
Input audio features. This should be returned by the [`SeamlessM4TFeatureExtractor`] class or the
[`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
return_intermediate_token_ids (`bool`, *optional*):
If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
to get translated text alongside the audio.
Note that if `generate_speech=False`, this parameter will be ignored and
the text tokens are returned.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
spkr_id (`int`, *optional*, defaults to 0):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
generate_speech (`bool`, *optional*, defaults to `True`):
If `False`, will only returns the text tokens and won't generate speech.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
Returns:
`Union[SeamlessM4TGenerationOutput, tuple[Tensor], ModelOutput]`:
- If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`].
- If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of
shape `(batch_size, sequence_length)` and `waveform_lengths` which gives the length of each sample.
- If `generate_speech=False`, it will returns `ModelOutput`.
"""
if input_ids is None and input_features is None and kwargs.get("inputs_embeds") is None:
raise ValueError(
"`input_ids`,`input_features` and `inputs_embeds` are all empty. Make sure at least one of them is not."
)
if generate_speech and tgt_lang is None:
raise ValueError("You must specify a `tgt_lang` to generate translated speech.")
if tgt_lang is not None:
# also accept __xxx__
tgt_lang = tgt_lang.replace("__", "")
for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]:
lang_code_to_id = getattr(self.generation_config, key, None)
if lang_code_to_id is None:
raise ValueError(
f"""This model generation config doesn't have a `{key}` key which maps the target language
to the right token id. Make sure to load the right generation config."""
)
elif tgt_lang not in lang_code_to_id:
raise ValueError(
f"""`tgt_lang={tgt_lang}` is not supported by this model.
Please specify a `tgt_lang` in {",".join(lang_code_to_id.keys())}. Note that SeamlessM4T supports
more languages for text translation than for speech synthesis."""
)
batch_size = (
len(input_features)
if input_features is not None
else (len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")))
)
kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
kwargs_text["output_hidden_states"] = True
kwargs_text["return_dict_in_generate"] = True
kwargs_text["output_scores"] = True
text_decoder_input_ids = kwargs_text.get("decoder_input_ids")
# overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
if tgt_lang is not None:
# tgt_lang gets priority over decoder input ids
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
kwargs_text["decoder_input_ids"] = text_decoder_input_ids
# first generation
if input_features is not None:
self.set_modality("speech")
if input_ids is not None:
logger.warning(
"`input_features` and `input_ids` are both non empty. `input_features` will be used in priority "
"through the speech encoder. Make sure `input_features=None` if you want to use the text encoder."
)
text_generation_output = super().generate(input_features=input_features, **kwargs_text)
else:
self.set_modality("text")
text_generation_output = super().generate(input_ids=input_ids, input_features=None, **kwargs_text)
sequences = text_generation_output.sequences
if not generate_speech:
return text_generation_output
# prepare second generation
num_return_sequences = len(sequences) // batch_size
attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None))
# get encoder last hidden states
if self.current_modality == "speech":
# get last_hidden_state from encoder - must do a pass through the speech encoder
encoder_hidden_states = self.speech_encoder(
input_features=input_features, attention_mask=attention_mask
).last_hidden_state
# input modality = speech so new attention mask for the decoder
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
encoder_hidden_states.device
)
attention_mask = _compute_new_attention_mask(
hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths
)
else:
encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
# take care of num_return_sequences
# take most probable hidden states per batch of return_sequences
# (batch_size*num_return_sequences, ...) -> (batch_size,...)
if num_return_sequences > 1:
idx_most_probable_sequences_per_batch = text_generation_output.sequences_scores.view(batch_size, -1)
idx_most_probable_sequences_per_batch = idx_most_probable_sequences_per_batch.argmax(-1)
idx_most_probable_sequences_per_batch = (
idx_most_probable_sequences_per_batch
+ torch.arange(batch_size, device=self.device) * num_return_sequences
)
sequences = sequences[idx_most_probable_sequences_per_batch]
# get decoder last hidden state - must do a pass through the text decoder
t2u_input_embeds = self.text_decoder(
input_ids=sequences,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
).last_hidden_state
pad_token_id = self.generation_config.pad_token_id
# Compute new attention mask
seq_lens = (sequences != pad_token_id).int().sum(1)
t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
kwargs_speech["attention_mask"] = t2u_model_attention_mask
# Compute t2u decoder_input_ids
t2u_decoder_input_ids = kwargs_speech.get("decoder_input_ids")
t2u_tgt_lang_id = self.generation_config.t2u_lang_code_to_id.get(tgt_lang)
t2u_decoder_input_ids = torch.tensor(
[[self.config.t2u_eos_token_id, t2u_tgt_lang_id]] * batch_size, device=self.device
)
kwargs_speech["decoder_input_ids"] = t2u_decoder_input_ids
# second generation
unit_ids = self.t2u_model.generate(inputs_embeds=t2u_input_embeds, **kwargs_speech)
output_unit_ids = unit_ids.detach().clone()
# get rid of t2u_decoder_input_ids
unit_ids = unit_ids[:, kwargs_speech["decoder_input_ids"].shape[1] :]
# replace eos per pad
unit_ids[unit_ids == self.config.t2u_eos_token_id] = self.config.t2u_pad_token_id
# offset of control symbols
unit_ids = torch.where(
unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset
)
vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids), device=self.device)
spkr_id = torch.tensor([[spkr_id]] * len(unit_ids), device=self.device)
waveform, waveform_lengths = self.vocoder(input_ids=unit_ids, spkr_id=spkr_id, lang_id=vocoder_tgt_lang_id)
if return_intermediate_token_ids:
return SeamlessM4TGenerationOutput(
waveform=waveform,
waveform_lengths=waveform_lengths,
sequences=sequences,
unit_sequences=output_unit_ids,
)
return waveform, waveform_lengths
__all__ = [
"SeamlessM4TForTextToSpeech",
"SeamlessM4TForSpeechToSpeech",
"SeamlessM4TForTextToText",
"SeamlessM4TForSpeechToText",
"SeamlessM4TModel",
"SeamlessM4TPreTrainedModel",
"SeamlessM4TCodeHifiGan",
"SeamlessM4THifiGan",
"SeamlessM4TTextToUnitForConditionalGeneration",
"SeamlessM4TTextToUnitModel",
]
| SeamlessM4TModel |
python | django__django | tests/admin_widgets/tests.py | {
"start": 40084,
"end": 48287
} | class ____(AdminWidgetSeleniumTestCase):
def test_show_hide_date_time_picker_widgets(self):
"""
Pressing the ESC key or clicking on a widget value closes the date and
time picker widgets.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
self.admin_login(username="super", password="secret", login_url="/")
# Open a page that has a date and time picker widgets
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_member_add")
)
# First, with the date picker widget ---------------------------------
cal_icon = self.selenium.find_element(By.ID, "calendarlink0")
# The date picker is hidden
self.assertFalse(
self.selenium.find_element(By.ID, "calendarbox0").is_displayed()
)
# Click the calendar icon
cal_icon.click()
# The date picker is visible
self.assertTrue(
self.selenium.find_element(By.ID, "calendarbox0").is_displayed()
)
# Press the ESC key
self.selenium.find_element(By.TAG_NAME, "body").send_keys([Keys.ESCAPE])
# The date picker is hidden again
self.assertFalse(
self.selenium.find_element(By.ID, "calendarbox0").is_displayed()
)
# Click the calendar icon, then on the 15th of current month
cal_icon.click()
self.selenium.find_element(By.XPATH, "//a[contains(text(), '15')]").click()
self.assertFalse(
self.selenium.find_element(By.ID, "calendarbox0").is_displayed()
)
self.assertEqual(
self.selenium.find_element(By.ID, "id_birthdate_0").get_attribute("value"),
datetime.today().strftime("%Y-%m-") + "15",
)
# Then, with the time picker widget ----------------------------------
time_icon = self.selenium.find_element(By.ID, "clocklink0")
# The time picker is hidden
self.assertFalse(self.selenium.find_element(By.ID, "clockbox0").is_displayed())
# Click the time icon
time_icon.click()
# The time picker is visible
self.assertTrue(self.selenium.find_element(By.ID, "clockbox0").is_displayed())
self.assertEqual(
[
x.text
for x in self.selenium.find_elements(
By.XPATH, "//ul[@class='timelist']/li/a"
)
],
["Now", "Midnight", "6 a.m.", "Noon", "6 p.m."],
)
# Press the ESC key
self.selenium.find_element(By.TAG_NAME, "body").send_keys([Keys.ESCAPE])
# The time picker is hidden again
self.assertFalse(self.selenium.find_element(By.ID, "clockbox0").is_displayed())
# Click the time icon, then select the 'Noon' value
time_icon.click()
self.selenium.find_element(By.XPATH, "//a[contains(text(), 'Noon')]").click()
self.assertFalse(self.selenium.find_element(By.ID, "clockbox0").is_displayed())
self.assertEqual(
self.selenium.find_element(By.ID, "id_birthdate_1").get_attribute("value"),
"12:00:00",
)
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS
class. Refs #4574.
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
# Open a page that has a date and time picker widgets
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_member_add")
)
# fill in the birth date.
self.selenium.find_element(By.ID, "id_birthdate_0").send_keys("2013-06-01")
# Click the calendar icon
self.selenium.find_element(By.ID, "calendarlink0").click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element(By.ID, "calendarin0")
tds = calendar0.find_elements(By.TAG_NAME, "td")
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute("class"), "nonday")
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
# Open a page that has a date and time picker widgets
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_member_add")
)
# fill in the birth date.
self.selenium.find_element(By.ID, "id_birthdate_0").send_keys("2013-06-01")
# Click the calendar icon
self.selenium.find_element(By.ID, "calendarlink0").click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element(By.ID, "calendarin0")
tds = calendar0.find_elements(By.TAG_NAME, "td")
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute("class"), "selected")
self.assertEqual(selected.text, "1")
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
# Open a page that has a date and time picker widgets
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_member_add")
)
# Click the calendar icon
self.selenium.find_element(By.ID, "calendarlink0").click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element(By.ID, "calendarin0")
tds = calendar0.find_elements(By.TAG_NAME, "td")
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute("class") == "selected"]
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
The calendar shows the date from the input field for every locale
supported by Django.
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
# Enter test data
member = Member.objects.create(
name="Bob", birthdate=datetime(1984, 5, 15), gender="M"
)
# Get month name translations for every locale
month_string = "May"
path = os.path.join(
os.path.dirname(import_module("django.contrib.admin").__file__), "locale"
)
url = reverse("admin:admin_widgets_member_change", args=(member.pk,))
with self.small_screen_size():
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation("djangojs", path, [language_code])
except OSError:
continue
if month_string in catalog._catalog:
month_name = catalog._catalog[month_string]
else:
month_name = month_string
# Get the expected caption.
may_translation = month_name
expected_caption = "{:s} {:d}".format(may_translation.upper(), 1984)
# Every locale.
with override_settings(LANGUAGE_CODE=language_code):
# Open a page that has a date picker widget.
self.selenium.get(self.live_server_url + url)
# Click on the calendar icon.
self.selenium.find_element(By.ID, "calendarlink0").click()
# The right month and year are displayed.
self.wait_for_text("#calendarin0 caption", expected_caption)
@requires_tz_support
@override_settings(TIME_ZONE="Asia/Singapore")
| DateTimePickerSeleniumTests |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/schedule_type.py | {
"start": 155,
"end": 1545
} | class ____(Enum):
HOURLY = "HOURLY"
DAILY = "DAILY"
WEEKLY = "WEEKLY"
MONTHLY = "MONTHLY"
@property
def ordinal(self):
return {"HOURLY": 1, "DAILY": 2, "WEEKLY": 3, "MONTHLY": 4}[self.value]
def __gt__(self, other: "ScheduleType") -> bool:
check.inst_param(
other, "other", ScheduleType, "Cannot compare ScheduleType with non-ScheduleType"
)
return self.ordinal > other.ordinal
def __lt__(self, other: "ScheduleType") -> bool:
check.inst_param(
other, "other", ScheduleType, "Cannot compare ScheduleType with non-ScheduleType"
)
return self.ordinal < other.ordinal
def cron_schedule_from_schedule_type_and_offsets(
schedule_type: ScheduleType,
minute_offset: int,
hour_offset: int,
day_offset: Optional[int],
) -> str:
if schedule_type is ScheduleType.HOURLY:
return f"{minute_offset} * * * *"
elif schedule_type is ScheduleType.DAILY:
return f"{minute_offset} {hour_offset} * * *"
elif schedule_type is ScheduleType.WEEKLY:
return f"{minute_offset} {hour_offset} * * {day_offset if day_offset is not None else 0}"
elif schedule_type is ScheduleType.MONTHLY:
return f"{minute_offset} {hour_offset} {day_offset if day_offset is not None else 1} * *"
else:
check.assert_never(schedule_type)
| ScheduleType |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/forms.py | {
"start": 3232,
"end": 3456
} | class ____(ReprForm):
_date = forms.DateField()
_date_time = forms.DateTimeField()
_duration = forms.DurationField()
_time = forms.TimeField()
_split_date_time = forms.SplitDateTimeField()
| TemporalFieldForm |
python | wandb__wandb | wandb/vendor/pygments/lexers/parsers.py | {
"start": 26222,
"end": 27582
} | class ____(RegexLexer):
"""
Lexer for `ISO/IEC 14977 EBNF
<http://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form>`_
grammars.
.. versionadded:: 2.0
"""
name = 'EBNF'
aliases = ['ebnf']
filenames = ['*.ebnf']
mimetypes = ['text/x-ebnf']
tokens = {
'root': [
include('whitespace'),
include('comment_start'),
include('identifier'),
(r'=', Operator, 'production'),
],
'production': [
include('whitespace'),
include('comment_start'),
include('identifier'),
(r'"[^"]*"', String.Double),
(r"'[^']*'", String.Single),
(r'(\?[^?]*\?)', Name.Entity),
(r'[\[\]{}(),|]', Punctuation),
(r'-', Operator),
(r';', Punctuation, '#pop'),
(r'\.', Punctuation, '#pop'),
],
'whitespace': [
(r'\s+', Text),
],
'comment_start': [
(r'\(\*', Comment.Multiline, 'comment'),
],
'comment': [
(r'[^*)]', Comment.Multiline),
include('comment_start'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[*)]', Comment.Multiline),
],
'identifier': [
(r'([a-zA-Z][\w \-]*)', Keyword),
],
}
| EbnfLexer |
python | ray-project__ray | python/ray/autoscaler/v2/tests/test_instance_util.py | {
"start": 232,
"end": 12121
} | class ____(unittest.TestCase):
def test_basic(self):
# New instance.
instance = InstanceUtil.new_instance("i-123", "type_1", Instance.QUEUED)
assert instance.instance_id == "i-123"
assert instance.instance_type == "type_1"
assert instance.status == Instance.QUEUED
# Set status.
assert InstanceUtil.set_status(instance, Instance.REQUESTED)
assert instance.status == Instance.REQUESTED
# Set status with invalid status.
assert not InstanceUtil.set_status(instance, Instance.RAY_RUNNING)
assert not InstanceUtil.set_status(instance, Instance.UNKNOWN)
def test_transition_graph(self):
# Assert on each edge in the graph.
all_status = set(Instance.InstanceStatus.values())
g = InstanceUtil.get_valid_transitions()
assert g[Instance.QUEUED] == {Instance.REQUESTED}
all_status.remove(Instance.QUEUED)
assert g[Instance.REQUESTED] == {
Instance.ALLOCATED,
Instance.QUEUED,
Instance.ALLOCATION_FAILED,
}
all_status.remove(Instance.REQUESTED)
assert g[Instance.ALLOCATED] == {
Instance.RAY_INSTALLING,
Instance.RAY_RUNNING,
Instance.RAY_STOPPING,
Instance.RAY_STOPPED,
Instance.TERMINATING,
Instance.TERMINATED,
Instance.ALLOCATION_TIMEOUT,
}
all_status.remove(Instance.ALLOCATED)
assert g[Instance.RAY_INSTALLING] == {
Instance.RAY_RUNNING,
Instance.RAY_INSTALL_FAILED,
Instance.RAY_STOPPED,
Instance.TERMINATING,
Instance.TERMINATED,
}
all_status.remove(Instance.RAY_INSTALLING)
assert g[Instance.RAY_RUNNING] == {
Instance.RAY_STOP_REQUESTED,
Instance.RAY_STOPPING,
Instance.RAY_STOPPED,
Instance.TERMINATING,
Instance.TERMINATED,
}
all_status.remove(Instance.RAY_RUNNING)
assert g[Instance.ALLOCATION_TIMEOUT] == {Instance.TERMINATING}
all_status.remove(Instance.ALLOCATION_TIMEOUT)
assert g[Instance.RAY_STOP_REQUESTED] == {
Instance.RAY_STOPPING,
Instance.RAY_STOPPED,
Instance.TERMINATED,
Instance.RAY_RUNNING,
}
all_status.remove(Instance.RAY_STOP_REQUESTED)
assert g[Instance.RAY_STOPPING] == {
Instance.RAY_STOPPED,
Instance.TERMINATING,
Instance.TERMINATED,
}
all_status.remove(Instance.RAY_STOPPING)
assert g[Instance.RAY_STOPPED] == {Instance.TERMINATED, Instance.TERMINATING}
all_status.remove(Instance.RAY_STOPPED)
assert g[Instance.TERMINATING] == {
Instance.TERMINATED,
Instance.TERMINATION_FAILED,
}
all_status.remove(Instance.TERMINATING)
assert g[Instance.TERMINATION_FAILED] == {Instance.TERMINATING}
all_status.remove(Instance.TERMINATION_FAILED)
assert g[Instance.TERMINATED] == set()
all_status.remove(Instance.TERMINATED)
assert g[Instance.ALLOCATION_FAILED] == set()
all_status.remove(Instance.ALLOCATION_FAILED)
assert g[Instance.RAY_INSTALL_FAILED] == {
Instance.TERMINATED,
Instance.TERMINATING,
}
all_status.remove(Instance.RAY_INSTALL_FAILED)
assert g[Instance.UNKNOWN] == set()
all_status.remove(Instance.UNKNOWN)
assert len(all_status) == 0
@patch("time.time_ns")
def test_status_time(self, mock_time):
mock_time.return_value = 1
instance = InstanceUtil.new_instance("i-123", "type_1", Instance.QUEUED)
# OK
assert (
InstanceUtil.get_status_transition_times_ns(instance, Instance.QUEUED)[0]
== 1
)
# No filter.
assert InstanceUtil.get_status_transition_times_ns(
instance,
) == [1]
# Missing status returns empty list
assert (
InstanceUtil.get_status_transition_times_ns(instance, Instance.REQUESTED)
== []
)
# Multiple status.
mock_time.return_value = 2
InstanceUtil.set_status(instance, Instance.REQUESTED)
mock_time.return_value = 3
InstanceUtil.set_status(instance, Instance.QUEUED)
mock_time.return_value = 4
InstanceUtil.set_status(instance, Instance.REQUESTED)
assert InstanceUtil.get_status_transition_times_ns(
instance, Instance.QUEUED
) == [1, 3]
@patch("time.time_ns")
def test_get_last_status_transition(self, mock_time):
mock_time.return_value = 1
instance = InstanceUtil.new_instance("i-123", "type_1", Instance.QUEUED)
assert (
InstanceUtil.get_last_status_transition(instance).instance_status
== Instance.QUEUED
)
assert InstanceUtil.get_last_status_transition(instance).timestamp_ns == 1
mock_time.return_value = 2
InstanceUtil.set_status(instance, Instance.REQUESTED)
assert (
InstanceUtil.get_last_status_transition(instance).instance_status
== Instance.REQUESTED
)
assert InstanceUtil.get_last_status_transition(instance).timestamp_ns == 2
mock_time.return_value = 3
InstanceUtil.set_status(instance, Instance.QUEUED)
assert (
InstanceUtil.get_last_status_transition(instance).instance_status
== Instance.QUEUED
)
assert InstanceUtil.get_last_status_transition(instance).timestamp_ns == 3
assert (
InstanceUtil.get_last_status_transition(
instance, select_instance_status=Instance.REQUESTED
).instance_status
== Instance.REQUESTED
)
assert (
InstanceUtil.get_last_status_transition(
instance, select_instance_status=Instance.REQUESTED
).timestamp_ns
== 2
)
assert (
InstanceUtil.get_last_status_transition(
instance, select_instance_status=Instance.RAY_RUNNING
)
is None
)
def test_is_cloud_instance_allocated(self):
all_status = set(Instance.InstanceStatus.values())
instance = InstanceUtil.new_instance("i-123", "type_1", Instance.QUEUED)
positive_status = {
Instance.ALLOCATED,
Instance.RAY_INSTALLING,
Instance.RAY_INSTALL_FAILED,
Instance.RAY_RUNNING,
Instance.RAY_STOP_REQUESTED,
Instance.RAY_STOPPING,
Instance.RAY_STOPPED,
Instance.TERMINATING,
Instance.TERMINATION_FAILED,
Instance.ALLOCATION_TIMEOUT,
}
for s in positive_status:
instance.status = s
assert InstanceUtil.is_cloud_instance_allocated(instance.status)
all_status.remove(s)
# Unknown not possible.
all_status.remove(Instance.UNKNOWN)
for s in all_status:
instance.status = s
assert not InstanceUtil.is_cloud_instance_allocated(instance.status)
def test_is_ray_running(self):
all_statuses = set(Instance.InstanceStatus.values())
positive_statuses = {
Instance.RAY_RUNNING,
Instance.RAY_STOP_REQUESTED,
Instance.RAY_STOPPING,
}
all_statuses.remove(Instance.UNKNOWN)
for s in positive_statuses:
assert InstanceUtil.is_ray_running(s)
all_statuses.remove(s)
for s in all_statuses:
assert not InstanceUtil.is_ray_running(s)
def test_is_ray_pending(self):
all_statuses = set(Instance.InstanceStatus.values())
all_statuses.remove(Instance.UNKNOWN)
positive_statuses = {
Instance.QUEUED,
Instance.REQUESTED,
Instance.RAY_INSTALLING,
Instance.ALLOCATED,
}
for s in positive_statuses:
assert InstanceUtil.is_ray_pending(s), Instance.InstanceStatus.Name(s)
all_statuses.remove(s)
for s in all_statuses:
assert not InstanceUtil.is_ray_pending(s), Instance.InstanceStatus.Name(s)
def test_is_ray_running_reachable(self):
all_status = set(Instance.InstanceStatus.values())
positive_status = {
Instance.QUEUED,
Instance.REQUESTED,
Instance.ALLOCATED,
Instance.RAY_INSTALLING,
Instance.RAY_RUNNING,
Instance.RAY_STOP_REQUESTED,
}
for s in positive_status:
assert InstanceUtil.is_ray_running_reachable(
s
), Instance.InstanceStatus.Name(s)
all_status.remove(s)
# Unknown not possible.
all_status.remove(Instance.UNKNOWN)
for s in all_status:
assert not InstanceUtil.is_ray_running_reachable(
s
), Instance.InstanceStatus.Name(s)
def test_reachable_from(self):
def add_reachable_from(reachable, src, transitions):
reachable[src] = set()
for dst in transitions[src]:
reachable[src].add(dst)
reachable[src] |= (
reachable[dst] if reachable[dst] is not None else set()
)
expected_reachable = {s: None for s in Instance.InstanceStatus.values()}
# Error status and terminal status.
expected_reachable[Instance.ALLOCATION_FAILED] = set()
expected_reachable[Instance.UNKNOWN] = set()
expected_reachable[Instance.TERMINATED] = set()
transitions = InstanceUtil.get_valid_transitions()
# Recursively build the reachable set from terminal statuses.
add_reachable_from(expected_reachable, Instance.TERMINATION_FAILED, transitions)
add_reachable_from(expected_reachable, Instance.TERMINATING, transitions)
# Add TERMINATION_FAILED again since it's also reachable from TERMINATING.
add_reachable_from(expected_reachable, Instance.TERMINATION_FAILED, transitions)
add_reachable_from(expected_reachable, Instance.RAY_STOPPED, transitions)
add_reachable_from(expected_reachable, Instance.RAY_STOPPING, transitions)
add_reachable_from(expected_reachable, Instance.RAY_STOP_REQUESTED, transitions)
add_reachable_from(expected_reachable, Instance.RAY_RUNNING, transitions)
# Add RAY_STOP_REQUESTED again since it's also reachable from RAY_RUNNING.
add_reachable_from(expected_reachable, Instance.RAY_STOP_REQUESTED, transitions)
add_reachable_from(expected_reachable, Instance.RAY_INSTALL_FAILED, transitions)
add_reachable_from(expected_reachable, Instance.RAY_INSTALLING, transitions)
add_reachable_from(expected_reachable, Instance.ALLOCATED, transitions)
add_reachable_from(expected_reachable, Instance.REQUESTED, transitions)
add_reachable_from(expected_reachable, Instance.QUEUED, transitions)
# Add REQUESTED again since it's also reachable from QUEUED.
add_reachable_from(expected_reachable, Instance.REQUESTED, transitions)
add_reachable_from(expected_reachable, Instance.ALLOCATION_TIMEOUT, transitions)
for s, expected in expected_reachable.items():
assert InstanceUtil.get_reachable_statuses(s) == expected, (
f"reachable_from({s}) = {InstanceUtil.get_reachable_statuses(s)} "
f"!= {expected}"
)
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| InstanceUtilTest |
python | pytorch__pytorch | torch/_dynamo/variables/user_defined.py | {
"start": 80778,
"end": 82152
} | class ____(VariableTracker):
REMOVED = -1
def __init__(
self,
mutation_type=None,
# index of the registration in the side_effects owned register_hook/handle list, used during removal.
idx=None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.mutation_type = mutation_type
self.idx = idx
def call_method(self, tx: "InstructionTranslator", method_name, args, kwargs):
if method_name == "remove":
if self.idx != self.REMOVED:
tx.output.side_effects.remove_hook(self.idx)
self.idx = self.REMOVED
return variables.ConstantVariable.create(None)
super().call_method(tx, method_name, args, kwargs)
def reconstruct(self, codegen: "PyCodegen"):
if self.idx == self.REMOVED:
# Hook has already been removed, return a dummy handle
codegen.add_push_null(
lambda: codegen.load_import_from(
"torch._dynamo.utils", "invalid_removeable_handle"
)
)
codegen.extend_output(create_call_function(0, False))
return
# unreachable due to codegen.add_cache() when the hook is installed
super().reconstruct(codegen)
def python_type(self):
return RemovableHandleClass
| RemovableHandleVariable |
python | pennersr__django-allauth | allauth/socialaccount/providers/foursquare/provider.py | {
"start": 599,
"end": 1100
} | class ____(OAuth2Provider):
id = "foursquare"
name = "Foursquare"
account_class = FoursquareAccount
oauth2_adapter_class = FoursquareOAuth2Adapter
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
return dict(
first_name=data.get("firstname"),
last_name=data.get("lastname"),
email=data.get("contact").get("email"),
)
provider_classes = [FoursquareProvider]
| FoursquareProvider |
python | explosion__spaCy | spacy/lang/ca/lemmatizer.py | {
"start": 97,
"end": 2843
} | class ____(Lemmatizer):
"""
Copied from French Lemmatizer
Catalan language lemmatizer applies the default rule based lemmatization
procedure with some modifications for better Catalan language support.
The parts of speech 'ADV', 'PRON', 'DET', 'ADP' and 'AUX' are added to use
the rule-based lemmatization. As a last resort, the lemmatizer checks in
the lookup table.
"""
@classmethod
def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]:
if mode == "rule":
required = ["lemma_lookup", "lemma_rules", "lemma_exc", "lemma_index"]
return (required, [])
else:
return super().get_lookups_config(mode)
def rule_lemmatize(self, token: Token) -> List[str]:
cache_key = (token.orth, token.pos)
if cache_key in self.cache:
return self.cache[cache_key]
string = token.text
univ_pos = token.pos_.lower()
if univ_pos in ("", "eol", "space"):
return [string.lower()]
elif "lemma_rules" not in self.lookups or univ_pos not in (
"noun",
"verb",
"adj",
"adp",
"adv",
"aux",
"cconj",
"det",
"pron",
"punct",
"sconj",
):
return self.lookup_lemmatize(token)
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
lookup_table = self.lookups.get_table("lemma_lookup", {})
index = index_table.get(univ_pos, {})
exceptions = exc_table.get(univ_pos, {})
rules = rules_table.get(univ_pos, [])
string = string.lower()
forms = []
if string in index:
forms.append(string)
self.cache[cache_key] = forms
return forms
forms.extend(exceptions.get(string, []))
oov_forms = []
if not forms:
for old, new in rules:
if string.endswith(old):
form = string[: len(string) - len(old)] + new
if not form:
pass
elif form in index or not form.isalpha():
forms.append(form)
else:
oov_forms.append(form)
if not forms:
forms.extend(oov_forms)
# use lookups, and fall back to the token itself
if not forms:
forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms
return forms
| CatalanLemmatizer |
python | pydantic__pydantic | pydantic/_internal/_schema_gather.py | {
"start": 458,
"end": 981
} | class ____(TypedDict):
"""Schema traversing result."""
collected_references: dict[str, DefinitionReferenceSchema | None]
"""The collected definition references.
If a definition reference schema can be inlined, it means that there is
only one in the whole core schema. As such, it is stored as the value.
Otherwise, the value is set to `None`.
"""
deferred_discriminator_schemas: list[CoreSchema]
"""The list of core schemas having the discriminator application deferred."""
| GatherResult |
python | kennethreitz__tablib | src/tablib/formats/_xls.py | {
"start": 234,
"end": 4133
} | class ____:
title = 'xls'
extensions = ('xls',)
@classmethod
def detect(cls, stream):
"""Returns True if given stream is a readable excel file."""
try:
xlrd.open_workbook(file_contents=stream)
return True
except Exception:
pass
try:
xlrd.open_workbook(file_contents=stream.read())
return True
except Exception:
pass
try:
xlrd.open_workbook(filename=stream)
return True
except Exception:
return False
@classmethod
def export_set(cls, dataset):
"""Returns XLS representation of Dataset."""
wb = xlwt.Workbook(encoding='utf8')
ws = wb.add_sheet(dataset.title if dataset.title else 'Tablib Dataset')
cls.dset_sheet(dataset, ws)
stream = BytesIO()
wb.save(stream)
return stream.getvalue()
@classmethod
def export_book(cls, databook):
"""Returns XLS representation of DataBook."""
wb = xlwt.Workbook(encoding='utf8')
for i, dset in enumerate(databook._datasets):
ws = wb.add_sheet(dset.title if dset.title else 'Sheet%s' % (i))
cls.dset_sheet(dset, ws)
stream = BytesIO()
wb.save(stream)
return stream.getvalue()
@classmethod
def import_set(cls, dset, in_stream, headers=True):
"""Returns databook from XLS stream."""
dset.wipe()
xls_book = xlrd.open_workbook(file_contents=in_stream.read())
sheet = xls_book.sheet_by_index(0)
dset.title = sheet.name
def cell_value(value, type_):
if type_ == xlrd.XL_CELL_ERROR:
return xlrd.error_text_from_code[value]
elif type_ == xlrd.XL_CELL_DATE:
return xldate_as_datetime(value, xls_book.datemode)
return value
for i in range(sheet.nrows):
if i == 0 and headers:
dset.headers = sheet.row_values(0)
else:
dset.append([
cell_value(val, typ)
for val, typ in zip(sheet.row_values(i), sheet.row_types(i))
])
@classmethod
def import_book(cls, dbook, in_stream, headers=True):
"""Returns databook from XLS stream."""
dbook.wipe()
xls_book = xlrd.open_workbook(file_contents=in_stream)
for sheet in xls_book.sheets():
data = tablib.Dataset()
data.title = sheet.name
for i in range(sheet.nrows):
if i == 0 and headers:
data.headers = sheet.row_values(0)
else:
data.append(sheet.row_values(i))
dbook.add_sheet(data)
@classmethod
def dset_sheet(cls, dataset, ws):
"""Completes given worksheet from given Dataset."""
_package = dataset._package(dicts=False)
for i, sep in enumerate(dataset._separators):
_offset = i
_package.insert((sep[0] + _offset), (sep[1],))
for i, row in enumerate(_package):
for j, col in enumerate(row):
# bold headers
if (i == 0) and dataset.headers:
ws.write(i, j, col, bold)
# frozen header row
ws.panes_frozen = True
ws.horz_split_pos = 1
# bold separators
elif len(row) < dataset.width:
ws.write(i, j, col, bold)
# wrap the rest
else:
try:
if '\n' in col:
ws.write(i, j, col, wrap)
else:
ws.write(i, j, col)
except TypeError:
ws.write(i, j, col)
| XLSFormat |
python | walkccc__LeetCode | solutions/2043. Simple Bank System/2043.py | {
"start": 0,
"end": 767
} | class ____:
def __init__(self, balance: list[int]):
self.balance = balance
def transfer(self, account1: int, account2: int, money: int) -> bool:
if not self._isValid(account2):
return False
return self.withdraw(account1, money) and self.deposit(account2, money)
def deposit(self, account: int, money: int) -> bool:
if not self._isValid(account):
return False
self.balance[account - 1] += money
return True
def withdraw(self, account: int, money: int) -> bool:
if not self._isValid(account):
return False
if self.balance[account - 1] < money:
return False
self.balance[account - 1] -= money
return True
def _isValid(self, account: int) -> bool:
return 1 <= account <= len(self.balance)
| Bank |
python | numba__numba | numba/tests/test_objects.py | {
"start": 265,
"end": 1319
} | class ____(TestCase):
def test_setattr(self):
pyfunc = setattr_usecase
cfunc = jit((types.pyobject, types.int32), forceobj=True)(pyfunc)
c = C()
cfunc(c, 123)
self.assertEqual(c.x, 123)
def test_setattr_attribute_error(self):
pyfunc = setattr_usecase
cfunc = jit((types.pyobject, types.int32), forceobj=True)(pyfunc)
# Can't set undeclared slot
with self.assertRaises(AttributeError):
cfunc(object(), 123)
def test_delattr(self):
pyfunc = delattr_usecase
cfunc = jit((types.pyobject,), forceobj=True)(pyfunc)
c = C()
c.x = 123
cfunc(c)
with self.assertRaises(AttributeError):
c.x
def test_delattr_attribute_error(self):
pyfunc = delattr_usecase
cfunc = jit((types.pyobject,), forceobj=True)(pyfunc)
# Can't delete non-existing attribute
with self.assertRaises(AttributeError):
cfunc(C())
if __name__ == '__main__':
unittest.main()
| TestAttributes |
python | pennersr__django-allauth | allauth/socialaccount/apps.py | {
"start": 125,
"end": 492
} | class ____(AppConfig):
name = "allauth.socialaccount"
verbose_name = _("Social Accounts")
default_auto_field = app_settings.DEFAULT_AUTO_FIELD or "django.db.models.AutoField"
def ready(self):
from allauth.socialaccount import checks # noqa
from allauth.socialaccount.providers import registry
registry.load()
| SocialAccountConfig |
python | mlflow__mlflow | dev/check_patch_prs.py | {
"start": 295,
"end": 1272
} | class ____:
sha: str
pr_num: int
def get_commits(branch: str):
"""
Get the commits in the release branch.
"""
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"git",
"clone",
"--shallow-since=3 months ago",
"--branch",
branch,
"https://github.com/mlflow/mlflow.git",
tmpdir,
],
)
log_stdout = subprocess.check_output(
[
"git",
"log",
"--pretty=format:%H %s",
],
text=True,
cwd=tmpdir,
)
pr_rgx = re.compile(r"([a-z0-9]+) .+\s+\(#(\d+)\)$")
return [
Commit(sha=m.group(1), pr_num=int(m.group(2)))
for commit in log_stdout.splitlines()
if (m := pr_rgx.search(commit.rstrip()))
]
@dataclass(frozen=True)
| Commit |
python | pytorch__pytorch | torch/_export/passes/constant_folding.py | {
"start": 1413,
"end": 11303
} | class ____(torch.fx.Interpreter):
def __init__(
self,
gm: torch.fx.GraphModule,
skip_constructors: bool = False,
):
super().__init__(gm)
self.node_replacements: dict[torch.fx.Node, Any] = {}
self.replaced_uses: dict[torch.fx.Node, int] = collections.Counter()
self.unknown_value = object()
self.skip_constructors: bool = skip_constructors
# overwrite this to deallocate env values if their only remaining use
# is the output
self.user_to_last_uses = self.node_to_last_non_output_use()
def is_impure(self, node: torch.fx.Node) -> bool:
if (
node.target is torch.ops.prims.convert_element_type.default
and node.args[0].op == "get_attr" # type: ignore[union-attr]
and node.args[0].meta["val"].dtype == torch.int8 # type: ignore[union-attr]
and node.args[1] == torch.bfloat16
):
# For int8_weight -> dq -> bf16_weight
return True
if node.target in [
torch.ops.quantized_decomposed.dequantize_per_channel.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor,
torch.ops.pt2e_quant.dequantize_affine,
]:
# For the pattern fp32_weight -> q -> dq
# We only folding fp32_weight -> q
# int8_weight and leave dq in graph to be fused
return True
return False
def node_to_last_non_output_use(self):
last_non_output_use = collections.defaultdict(list)
seen_uses = set()
output_node = next(iter(reversed(self.module.graph.nodes))) # type: ignore[arg-type, union-attr]
for node in reversed(self.module.graph.nodes): # type: ignore[arg-type, union-attr]
if node.target == "output":
continue
def add_use(inp):
if inp in seen_uses:
return
seen_uses.add(inp)
last_non_output_use[node].append(inp)
# In-place is fine since we don't mutate
pytree.tree_map_only_(torch.fx.Node, add_use, (node.args, node.kwargs))
# if this node is only used in output, we want to gc it right away
if len(node.users) == 1 and output_node in node.users:
last_non_output_use[node].append(node)
return last_non_output_use
def run_node(self, node):
if node.target == "output":
# because we remove nodes from env on last non output use,
# re-define them now or we'll get error in interpreter
def set_env(arg):
self.env[arg] = self.unknown_value
# In-place is fine since we don't mutate
pytree.tree_map_only_(torch.fx.Node, set_env, node.args)
return super().run_node(node)
args, kwargs = self.fetch_args_kwargs_from_env(node)
flattened_inputs = pytree.arg_tree_leaves(*args, **kwargs)
# We need to do this weird thing because in cases where flattened_inputs
# contains a ScriptObject, equality checking results in a type error if
# the types are different.
if any(
type(self.unknown_value) is type(input_) and self.unknown_value == input_
for input_ in flattened_inputs
):
return self.unknown_value
# TODO - fix errors with this
if (
node.op == "call_function"
and node.target is aten._efficientzerotensor.default
):
return self.unknown_value
# TODO - constant folding triton kernel returns the inputs -- fix this
if (
node.op == "call_function"
and node.name == "triton_kernel_wrapper_functional_proxy"
):
return self.unknown_value
# skip constructors, since inductor generates optimal code for them already
# and turning into tensor would result in an additional global memory read
# TODO - more complicated strategy
if (
self.skip_constructors
and node.op != "get_attr"
and not any(isinstance(e, torch.Tensor) for e in flattened_inputs)
):
return self.unknown_value
# All mutations should either be removed or on inputs which we did not make constant
if (
isinstance(node.target, torch._ops.OpOverload)
and torch.Tag.nondeterministic_seeded in node.target.tags
):
return self.unknown_value
out = super().run_node(node)
if node.op != "get_attr" and isinstance(out, torch.Tensor):
if out.device.type == "meta":
return out
if not self.insertable_tensor_check(out):
return out
if self.is_impure(node):
return self.unknown_value
self.add_node_replacement(node, out)
flattened_node_inps = pytree.arg_tree_leaves(*node.args, **node.kwargs)
for n in flattened_node_inps:
if not isinstance(n, torch.fx.Node):
continue
self.replaced_uses[n] += 1
for to_delete in self.user_to_last_uses.get(node, []):
if self.replaced_uses[to_delete] == len(to_delete.users):
self.node_replacements.pop(to_delete, None)
return out
def insertable_tensor_check(self, tensor: torch.Tensor) -> bool:
return True
def add_node_replacement(self, node: torch.fx.Node, tensor: torch.Tensor) -> None:
self.node_replacements[node] = tensor
def run(self): # type: ignore[override]
env = {}
for n in self.module.graph.find_nodes(op="placeholder"): # type: ignore[operator, union-attr]
env[n] = self.unknown_value
return super().run(initial_env=env)
def constant_fold(
gm: torch.fx.GraphModule,
constraint_fn: Optional[Callable[[torch.fx.Node], bool]] = None,
):
with torch.utils._python_dispatch._disable_current_modes():
cf = ConstantFolder(gm, skip_constructors=True)
cf.run()
for node, constant in cf.node_replacements.items():
if constraint_fn is not None and not constraint_fn(node):
continue
replace_node_with_constant(gm, node, constant)
erased_params = []
# Get all attr users by looking up the graph instead from node.users, because in this case
# _tensor_constant0 and _tensor_constant0_1 are actually refereing to the same tensor.
# opcode name target args kwargs
# ------------- ------------------- ---------------- --------------------------- --------
# placeholder arg0_1 arg0 () {}
# get_attr _tensor_constant0 state () {}
# call_function add aten.add.Tensor (arg0_1, _tensor_constant0) {}
# get_attr _tensor_constant0_1 state () {}
# call_function add_ aten.add_.Tensor (_tensor_constant0_1, 1) {}
# output output output ([add],) {}
get_attr_node_users = defaultdict(list)
for node in gm.graph.nodes:
if node.op == "get_attr":
get_attr_node_users[node.target].extend(node.users.keys())
for node in gm.graph.find_nodes(op="get_attr"):
if node.op == "get_attr" and len(get_attr_node_users[node.target]) == 0:
if hasattr(gm, node.target):
delattr(gm, node.target)
erased_params.append(node)
for node in erased_params:
gm.graph.erase_node(node)
gm.graph.eliminate_dead_code()
gm.graph.lint()
gm.recompile()
def constant_graph_tag(gm: torch.fx.GraphModule) -> None:
with torch.utils._python_dispatch._disable_current_modes():
cf = ConstantFolder(gm, skip_constructors=True)
cf.run()
for node in gm.graph.nodes:
if (
node.op == "get_attr"
or node in cf.node_replacements
or node in cf.replaced_uses
):
node.meta[META_TAG] = CONST_MODULE_TAG
else:
node.meta[META_TAG] = MODULE_TAG
def run_and_get_constant_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
"""
Construct a GraphModule which corresponds to the part which could be
constant folded in provided gm.
"""
constant_graph_tag(gm)
# We rewrite the tags, if it's a constant being directly consumed, without
# any folding opportunity, we keep it in main gm.
for node in gm.graph.find_nodes(op="get_attr"):
used_to_fold = False
for u in node.users:
if u.meta[META_TAG] == CONST_MODULE_TAG:
used_to_fold = True
break
if not used_to_fold:
node.meta[META_TAG] = MODULE_TAG
new_graph = torch.fx.Graph()
node_remapping: dict[torch.fx.Node, torch.fx.Node] = {}
output_nodes = []
for node in gm.graph.nodes:
if node.meta[META_TAG] == MODULE_TAG:
continue
new_node = new_graph.node_copy(node, lambda x: node_remapping[x])
node_remapping[node] = new_node
for user in node.users:
if user.meta[META_TAG] == MODULE_TAG:
output_nodes.append(new_node)
break
new_graph.output(tuple(output_nodes))
new_graph.lint()
new_gm = torch.fx.GraphModule(gm, new_graph)
return new_gm
| ConstantFolder |
python | pytorch__pytorch | test/inductor/test_minifier.py | {
"start": 4526,
"end": 5132
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, arg0_1):
log1p = torch.ops.aten.log1p.default(arg0_1); arg0_1 = None
return (log1p,)""",
)
# FP accuracy will refuse to promote the logical_not on the outputs,
# and so you'll get to the relu (unless the minifier somehow tries
# removing entire suffix except the log1p first!)
res = self._run_full_test(run_code, "aot", "AccuracyError", isolate=False)
self.assertExpectedInline(
res.repro_module(),
"""\
| Repro |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/query.py | {
"start": 119567,
"end": 119902
} | class ____(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(
self,
query: Query[Any],
values: Dict[_DMLColumnArgument, Any],
update_kwargs: Optional[Dict[Any, Any]],
):
super().__init__(query)
self.values = values
self.update_kwargs = update_kwargs
| BulkUpdate |
python | ray-project__ray | python/ray/tune/examples/pbt_dcgan_mnist/common.py | {
"start": 3291,
"end": 8107
} | class ____(nn.Module):
"""
LeNet for MNist classification, used for inception_score
"""
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def inception_score(imgs, mnist_model_ref, batch_size=32, splits=1):
N = len(imgs)
dtype = torch.FloatTensor
dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)
cm = ray.get(mnist_model_ref) # Get the mnist model from Ray object store.
up = nn.Upsample(size=(28, 28), mode="bilinear").type(dtype)
def get_pred(x):
x = up(x)
x = cm(x)
return F.softmax(x).data.cpu().numpy()
preds = np.zeros((N, 10))
for i, batch in enumerate(dataloader, 0):
batch = batch.type(dtype)
batchv = Variable(batch)
batch_size_i = batch.size()[0]
preds[i * batch_size : i * batch_size + batch_size_i] = get_pred(batchv)
# Now compute the mean kl-div
split_scores = []
for k in range(splits):
part = preds[k * (N // splits) : (k + 1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
return np.mean(split_scores), np.std(split_scores)
# __INCEPTION_SCORE_end__
def train_func(
netD,
netG,
optimG,
optimD,
criterion,
dataloader,
iteration,
device,
mnist_model_ref,
):
real_label = 1
fake_label = 0
for i, data in enumerate(dataloader, 0):
if i >= train_iterations_per_step:
break
netD.zero_grad()
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float, device=device)
output = netD(real_cpu).view(-1)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.mean().item()
noise = torch.randn(b_size, nz, 1, 1, device=device)
fake = netG(noise)
label.fill_(fake_label)
output = netD(fake.detach()).view(-1)
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimD.step()
netG.zero_grad()
label.fill_(real_label)
output = netD(fake).view(-1)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
optimG.step()
is_score, is_std = inception_score(fake, mnist_model_ref)
# Output training stats
if iteration % 10 == 0:
print(
"[%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z))"
": %.4f / %.4f \tInception score: %.4f"
% (
iteration,
len(dataloader),
errD.item(),
errG.item(),
D_x,
D_G_z1,
D_G_z2,
is_score,
)
)
return errG.item(), errD.item(), is_score
def plot_images(dataloader):
# Plot some training images
real_batch = next(iter(dataloader))
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Original Images")
plt.imshow(
np.transpose(
vutils.make_grid(real_batch[0][:64], padding=2, normalize=True).cpu(),
(1, 2, 0),
)
)
plt.show()
def demo_gan(checkpoint_paths):
img_list = []
fixed_noise = torch.randn(64, nz, 1, 1)
for path in checkpoint_paths:
checkpoint_dict = torch.load(os.path.join(path, "checkpoint.pt"))
loadedG = Generator()
loadedG.load_state_dict(checkpoint_dict["netGmodel"])
with torch.no_grad():
fake = loadedG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
fig = plt.figure(figsize=(8, 8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)] for i in img_list]
ani = animation.ArtistAnimation(
fig, ims, interval=1000, repeat_delay=1000, blit=True
)
ani.save("./generated.gif", writer="imagemagick", dpi=72)
plt.show()
| Net |
python | pytorch__pytorch | test/profiler/test_cpp_thread.py | {
"start": 1103,
"end": 2375
} | class ____(cpp.ProfilerEventHandler):
def onIterationStart(self, iteration: int) -> None:
global KinetoProfiler, IterationCount
# it is important to start the profiler on the same thread that step() is called
# and yes, onIterationStart() will always be called on the same thread
if iteration == 0:
# this also means step() starts on iteration 1, not 0
KinetoProfiler.start()
blueprint("starting kineto profiler")
elif iteration == IterationCount - 1:
KinetoProfiler.stop()
blueprint("stopping kineto profiler")
else:
blueprint("stepping kineto profiler")
KinetoProfiler.step()
def emulateTraining(self, iteration: int, thread_id: int) -> None:
global device
# blueprint(f"training iteration {iteration} in thread {thread_id}")
torch_device = getattr(torch, device)
assert hasattr(torch_device, "synchronize")
sync_func = torch_device.synchronize
with torch.autograd.profiler.record_function("user_function"):
a = torch.ones(1, device=device)
b = torch.ones(1, device=device)
torch.add(a, b).cpu()
sync_func()
| PythonProfilerEventHandler |
python | doocs__leetcode | solution/0300-0399/0392.Is Subsequence/Solution.py | {
"start": 0,
"end": 226
} | class ____:
def isSubsequence(self, s: str, t: str) -> bool:
i = j = 0
while i < len(s) and j < len(t):
if s[i] == t[j]:
i += 1
j += 1
return i == len(s)
| Solution |
python | euske__pdfminer | pdfminer/pdfinterp.py | {
"start": 6274,
"end": 9324
} | class ____(PSStackParser):
def __init__(self, streams):
self.streams = streams
self.istream = 0
PSStackParser.__init__(self, None)
return
def fillfp(self):
if not self.fp:
if self.istream < len(self.streams):
strm = stream_value(self.streams[self.istream])
self.istream += 1
else:
raise PSEOF('Unexpected EOF, file truncated?')
self.fp = BytesIO(strm.get_data())
return
def seek(self, pos):
self.fillfp()
PSStackParser.seek(self, pos)
return
def fillbuf(self):
if self.charpos < len(self.buf):
return
while 1:
self.fillfp()
self.bufpos = self.fp.tell()
self.buf = self.fp.read(self.BUFSIZ)
if self.buf:
break
self.fp = None
self.charpos = 0
return
def get_inline_data(self, pos, target=b'EI'):
self.seek(pos)
i = 0
data = b''
while i <= len(target):
self.fillbuf()
if i:
c = self.buf[self.charpos:self.charpos+1]
data += c
self.charpos += 1
if len(target) <= i and c.isspace():
i += 1
elif i < len(target) and c == target[i:i+1]:
i += 1
else:
i = 0
else:
try:
j = self.buf.index(target[0], self.charpos)
#print('found', (0, self.buf[j:j+10]))
data += self.buf[self.charpos:j+1]
self.charpos = j+1
i = 1
except ValueError:
data += self.buf[self.charpos:]
self.charpos = len(self.buf)
data = data[:-(len(target)+1)] # strip the last part
data = re.sub(br'(\x0d\x0a|[\x0d\x0a])$', b'', data)
return (pos, data)
def flush(self):
self.add_results(*self.popall())
return
KEYWORD_BI = KWD(b'BI')
KEYWORD_ID = KWD(b'ID')
KEYWORD_EI = KWD(b'EI')
def do_keyword(self, pos, token):
if token is self.KEYWORD_BI:
# inline image within a content stream
self.start_type(pos, 'inline')
elif token is self.KEYWORD_ID:
try:
(_, objs) = self.end_type('inline')
if len(objs) % 2 != 0:
raise PSTypeError('Invalid dictionary construct: %r' % objs)
d = dict((literal_name(k), v) for (k, v) in choplist(2, objs))
(pos, data) = self.get_inline_data(pos+len(b'ID '))
obj = PDFStream(d, data)
self.push((pos, obj))
self.push((pos, self.KEYWORD_EI))
except PSTypeError:
if STRICT:
raise
else:
self.push((pos, token))
return
## Interpreter
##
| PDFContentParser |
python | pandas-dev__pandas | pandas/core/window/rolling.py | {
"start": 22141,
"end": 29909
} | class ____(BaseWindow):
"""
Provide the groupby windowing facilities.
"""
_grouper: BaseGrouper
_as_index: bool
_attributes: list[str] = ["_grouper"]
def __init__(
self,
obj: DataFrame | Series,
*args,
_grouper: BaseGrouper,
_as_index: bool = True,
**kwargs,
) -> None:
from pandas.core.groupby.ops import BaseGrouper
if not isinstance(_grouper, BaseGrouper):
raise ValueError("Must pass a BaseGrouper object.")
self._grouper = _grouper
self._as_index = _as_index
# GH 32262: It's convention to keep the grouping column in
# groupby.<agg_func>, but unexpected to users in
# groupby.rolling.<agg_func>
obj = obj.drop(columns=self._grouper.names, errors="ignore")
# GH 15354
if kwargs.get("step") is not None:
raise NotImplementedError("step not implemented for groupby")
super().__init__(obj, *args, **kwargs)
def _apply(
self,
func: Callable[..., Any],
name: str,
numeric_only: bool = False,
numba_args: tuple[Any, ...] = (),
**kwargs,
) -> DataFrame | Series:
result = super()._apply(
func,
name,
numeric_only,
numba_args,
**kwargs,
)
# Reconstruct the resulting MultiIndex
# 1st set of levels = group by labels
# 2nd set of levels = original DataFrame/Series index
grouped_object_index = self.obj.index
grouped_index_name = [*grouped_object_index.names]
groupby_keys = copy.copy(self._grouper.names)
result_index_names = groupby_keys + grouped_index_name
drop_columns = [
key
for key in self._grouper.names
if key not in self.obj.index.names or key is None
]
if len(drop_columns) != len(groupby_keys):
# Our result will have still kept the column in the result
result = result.drop(columns=drop_columns, errors="ignore")
codes = self._grouper.codes
levels = copy.copy(self._grouper.levels)
group_indices = self._grouper.indices.values()
if group_indices:
indexer = np.concatenate(list(group_indices))
else:
indexer = np.array([], dtype=np.intp)
codes = [c.take(indexer) for c in codes]
# if the index of the original dataframe needs to be preserved, append
# this index (but reordered) to the codes/levels from the groupby
if grouped_object_index is not None:
idx = grouped_object_index.take(indexer)
if not isinstance(idx, MultiIndex):
idx = MultiIndex.from_arrays([idx])
codes.extend(list(idx.codes))
levels.extend(list(idx.levels))
result_index = MultiIndex(
levels, codes, names=result_index_names, verify_integrity=False
)
result.index = result_index
if not self._as_index:
result = result.reset_index(level=list(range(len(groupby_keys))))
return result
def _apply_pairwise(
self,
target: DataFrame | Series,
other: DataFrame | Series | None,
pairwise: bool | None,
func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series],
numeric_only: bool,
) -> DataFrame | Series:
"""
Apply the given pairwise function given 2 pandas objects (DataFrame/Series)
"""
# Manually drop the grouping column first
target = target.drop(columns=self._grouper.names, errors="ignore")
result = super()._apply_pairwise(target, other, pairwise, func, numeric_only)
# 1) Determine the levels + codes of the groupby levels
if other is not None and not all(
len(group) == len(other) for group in self._grouper.indices.values()
):
# GH 42915
# len(other) != len(any group), so must reindex (expand) the result
# from flex_binary_moment to a "transform"-like result
# per groupby combination
old_result_len = len(result)
result = concat(
[
result.take(gb_indices).reindex(result.index)
for gb_indices in self._grouper.indices.values()
]
)
gb_pairs = (
com.maybe_make_list(pair) for pair in self._grouper.indices.keys()
)
groupby_codes = []
groupby_levels = []
# e.g. [[1, 2], [4, 5]] as [[1, 4], [2, 5]]
for gb_level_pair in map(list, zip(*gb_pairs, strict=True)):
labels = np.repeat(np.array(gb_level_pair), old_result_len)
codes, levels = factorize(labels)
groupby_codes.append(codes)
groupby_levels.append(levels)
else:
# pairwise=True or len(other) == len(each group), so repeat
# the groupby labels by the number of columns in the original object
groupby_codes = self._grouper.codes
# error: Incompatible types in assignment (expression has type
# "List[Index]", variable has type "List[Union[ndarray, Index]]")
groupby_levels = self._grouper.levels # type: ignore[assignment]
group_indices = self._grouper.indices.values()
if group_indices:
indexer = np.concatenate(list(group_indices))
else:
indexer = np.array([], dtype=np.intp)
if target.ndim == 1:
repeat_by = 1
else:
repeat_by = len(target.columns)
groupby_codes = [
np.repeat(c.take(indexer), repeat_by) for c in groupby_codes
]
# 2) Determine the levels + codes of the result from super()._apply_pairwise
if isinstance(result.index, MultiIndex):
result_codes = list(result.index.codes)
result_levels = list(result.index.levels)
result_names = list(result.index.names)
else:
idx_codes, idx_levels = factorize(result.index)
result_codes = [idx_codes]
result_levels = [idx_levels]
result_names = [result.index.name]
# 3) Create the resulting index by combining 1) + 2)
result_codes = groupby_codes + result_codes
result_levels = groupby_levels + result_levels
result_names = self._grouper.names + result_names
result_index = MultiIndex(
result_levels, result_codes, names=result_names, verify_integrity=False
)
result.index = result_index
return result
def _create_data(self, obj: NDFrameT, numeric_only: bool = False) -> NDFrameT:
"""
Split data into blocks & return conformed data.
"""
# Ensure the object we're rolling over is monotonically sorted relative
# to the groups
# GH 36197
if not obj.empty:
groupby_order = np.concatenate(list(self._grouper.indices.values())).astype(
np.int64
)
obj = obj.take(groupby_order)
return super()._create_data(obj, numeric_only)
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried through to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
# GH 43355
subset = self.obj.set_index(self._on)
return super()._gotitem(key, ndim, subset=subset)
@set_module("pandas.api.typing")
| BaseWindowGroupby |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_date_time_02.py | {
"start": 293,
"end": 16641
} | class ____(unittest.TestCase):
"""
Test the Worksheet _convert_date_time() method against dates extracted
from Excel.
"""
def setUp(self):
self.worksheet = Worksheet()
# Dates and corresponding numbers from an Excel file.
self.excel_dates = [
("1899-12-31T", 0),
# 1900-1-1 fails for datetime.datetime due to a difference in the
# way it handles time only values and the way Excel does.
# ('1900-01-01T', 1),
("1900-02-27T", 58),
("1900-02-28T", 59),
("1900-03-01T", 61),
("1900-03-02T", 62),
("1900-03-11T", 71),
("1900-04-08T", 99),
("1900-09-12T", 256),
("1901-05-03T", 489),
("1901-10-13T", 652),
("1902-02-15T", 777),
("1902-06-06T", 888),
("1902-09-25T", 999),
("1902-09-27T", 1001),
("1903-04-26T", 1212),
("1903-08-05T", 1313),
("1903-12-31T", 1461),
("1904-01-01T", 1462),
("1904-02-28T", 1520),
("1904-02-29T", 1521),
("1904-03-01T", 1522),
("1907-02-27T", 2615),
("1907-02-28T", 2616),
("1907-03-01T", 2617),
("1907-03-02T", 2618),
("1907-03-03T", 2619),
("1907-03-04T", 2620),
("1907-03-05T", 2621),
("1907-03-06T", 2622),
("1999-01-01T", 36161),
("1999-01-31T", 36191),
("1999-02-01T", 36192),
("1999-02-28T", 36219),
("1999-03-01T", 36220),
("1999-03-31T", 36250),
("1999-04-01T", 36251),
("1999-04-30T", 36280),
("1999-05-01T", 36281),
("1999-05-31T", 36311),
("1999-06-01T", 36312),
("1999-06-30T", 36341),
("1999-07-01T", 36342),
("1999-07-31T", 36372),
("1999-08-01T", 36373),
("1999-08-31T", 36403),
("1999-09-01T", 36404),
("1999-09-30T", 36433),
("1999-10-01T", 36434),
("1999-10-31T", 36464),
("1999-11-01T", 36465),
("1999-11-30T", 36494),
("1999-12-01T", 36495),
("1999-12-31T", 36525),
("2000-01-01T", 36526),
("2000-01-31T", 36556),
("2000-02-01T", 36557),
("2000-02-29T", 36585),
("2000-03-01T", 36586),
("2000-03-31T", 36616),
("2000-04-01T", 36617),
("2000-04-30T", 36646),
("2000-05-01T", 36647),
("2000-05-31T", 36677),
("2000-06-01T", 36678),
("2000-06-30T", 36707),
("2000-07-01T", 36708),
("2000-07-31T", 36738),
("2000-08-01T", 36739),
("2000-08-31T", 36769),
("2000-09-01T", 36770),
("2000-09-30T", 36799),
("2000-10-01T", 36800),
("2000-10-31T", 36830),
("2000-11-01T", 36831),
("2000-11-30T", 36860),
("2000-12-01T", 36861),
("2000-12-31T", 36891),
("2001-01-01T", 36892),
("2001-01-31T", 36922),
("2001-02-01T", 36923),
("2001-02-28T", 36950),
("2001-03-01T", 36951),
("2001-03-31T", 36981),
("2001-04-01T", 36982),
("2001-04-30T", 37011),
("2001-05-01T", 37012),
("2001-05-31T", 37042),
("2001-06-01T", 37043),
("2001-06-30T", 37072),
("2001-07-01T", 37073),
("2001-07-31T", 37103),
("2001-08-01T", 37104),
("2001-08-31T", 37134),
("2001-09-01T", 37135),
("2001-09-30T", 37164),
("2001-10-01T", 37165),
("2001-10-31T", 37195),
("2001-11-01T", 37196),
("2001-11-30T", 37225),
("2001-12-01T", 37226),
("2001-12-31T", 37256),
("2400-01-01T", 182623),
("2400-01-31T", 182653),
("2400-02-01T", 182654),
("2400-02-29T", 182682),
("2400-03-01T", 182683),
("2400-03-31T", 182713),
("2400-04-01T", 182714),
("2400-04-30T", 182743),
("2400-05-01T", 182744),
("2400-05-31T", 182774),
("2400-06-01T", 182775),
("2400-06-30T", 182804),
("2400-07-01T", 182805),
("2400-07-31T", 182835),
("2400-08-01T", 182836),
("2400-08-31T", 182866),
("2400-09-01T", 182867),
("2400-09-30T", 182896),
("2400-10-01T", 182897),
("2400-10-31T", 182927),
("2400-11-01T", 182928),
("2400-11-30T", 182957),
("2400-12-01T", 182958),
("2400-12-31T", 182988),
("4000-01-01T", 767011),
("4000-01-31T", 767041),
("4000-02-01T", 767042),
("4000-02-29T", 767070),
("4000-03-01T", 767071),
("4000-03-31T", 767101),
("4000-04-01T", 767102),
("4000-04-30T", 767131),
("4000-05-01T", 767132),
("4000-05-31T", 767162),
("4000-06-01T", 767163),
("4000-06-30T", 767192),
("4000-07-01T", 767193),
("4000-07-31T", 767223),
("4000-08-01T", 767224),
("4000-08-31T", 767254),
("4000-09-01T", 767255),
("4000-09-30T", 767284),
("4000-10-01T", 767285),
("4000-10-31T", 767315),
("4000-11-01T", 767316),
("4000-11-30T", 767345),
("4000-12-01T", 767346),
("4000-12-31T", 767376),
("4321-01-01T", 884254),
("4321-01-31T", 884284),
("4321-02-01T", 884285),
("4321-02-28T", 884312),
("4321-03-01T", 884313),
("4321-03-31T", 884343),
("4321-04-01T", 884344),
("4321-04-30T", 884373),
("4321-05-01T", 884374),
("4321-05-31T", 884404),
("4321-06-01T", 884405),
("4321-06-30T", 884434),
("4321-07-01T", 884435),
("4321-07-31T", 884465),
("4321-08-01T", 884466),
("4321-08-31T", 884496),
("4321-09-01T", 884497),
("4321-09-30T", 884526),
("4321-10-01T", 884527),
("4321-10-31T", 884557),
("4321-11-01T", 884558),
("4321-11-30T", 884587),
("4321-12-01T", 884588),
("4321-12-31T", 884618),
("9999-01-01T", 2958101),
("9999-01-31T", 2958131),
("9999-02-01T", 2958132),
("9999-02-28T", 2958159),
("9999-03-01T", 2958160),
("9999-03-31T", 2958190),
("9999-04-01T", 2958191),
("9999-04-30T", 2958220),
("9999-05-01T", 2958221),
("9999-05-31T", 2958251),
("9999-06-01T", 2958252),
("9999-06-30T", 2958281),
("9999-07-01T", 2958282),
("9999-07-31T", 2958312),
("9999-08-01T", 2958313),
("9999-08-31T", 2958343),
("9999-09-01T", 2958344),
("9999-09-30T", 2958373),
("9999-10-01T", 2958374),
("9999-10-31T", 2958404),
("9999-11-01T", 2958405),
("9999-11-30T", 2958434),
("9999-12-01T", 2958435),
("9999-12-31T", 2958465),
]
# Dates and corresponding numbers from an Excel file.
self.excel_1904_dates = [
("1904-01-01T", 0),
("1904-01-31T", 30),
("1904-02-01T", 31),
("1904-02-29T", 59),
("1904-03-01T", 60),
("1904-03-31T", 90),
("1904-04-01T", 91),
("1904-04-30T", 120),
("1904-05-01T", 121),
("1904-05-31T", 151),
("1904-06-01T", 152),
("1904-06-30T", 181),
("1904-07-01T", 182),
("1904-07-31T", 212),
("1904-08-01T", 213),
("1904-08-31T", 243),
("1904-09-01T", 244),
("1904-09-30T", 273),
("1904-10-01T", 274),
("1904-10-31T", 304),
("1904-11-01T", 305),
("1904-11-30T", 334),
("1904-12-01T", 335),
("1904-12-31T", 365),
("1907-02-27T", 1153),
("1907-02-28T", 1154),
("1907-03-01T", 1155),
("1907-03-02T", 1156),
("1907-03-03T", 1157),
("1907-03-04T", 1158),
("1907-03-05T", 1159),
("1907-03-06T", 1160),
("1999-01-01T", 34699),
("1999-01-31T", 34729),
("1999-02-01T", 34730),
("1999-02-28T", 34757),
("1999-03-01T", 34758),
("1999-03-31T", 34788),
("1999-04-01T", 34789),
("1999-04-30T", 34818),
("1999-05-01T", 34819),
("1999-05-31T", 34849),
("1999-06-01T", 34850),
("1999-06-30T", 34879),
("1999-07-01T", 34880),
("1999-07-31T", 34910),
("1999-08-01T", 34911),
("1999-08-31T", 34941),
("1999-09-01T", 34942),
("1999-09-30T", 34971),
("1999-10-01T", 34972),
("1999-10-31T", 35002),
("1999-11-01T", 35003),
("1999-11-30T", 35032),
("1999-12-01T", 35033),
("1999-12-31T", 35063),
("2000-01-01T", 35064),
("2000-01-31T", 35094),
("2000-02-01T", 35095),
("2000-02-29T", 35123),
("2000-03-01T", 35124),
("2000-03-31T", 35154),
("2000-04-01T", 35155),
("2000-04-30T", 35184),
("2000-05-01T", 35185),
("2000-05-31T", 35215),
("2000-06-01T", 35216),
("2000-06-30T", 35245),
("2000-07-01T", 35246),
("2000-07-31T", 35276),
("2000-08-01T", 35277),
("2000-08-31T", 35307),
("2000-09-01T", 35308),
("2000-09-30T", 35337),
("2000-10-01T", 35338),
("2000-10-31T", 35368),
("2000-11-01T", 35369),
("2000-11-30T", 35398),
("2000-12-01T", 35399),
("2000-12-31T", 35429),
("2001-01-01T", 35430),
("2001-01-31T", 35460),
("2001-02-01T", 35461),
("2001-02-28T", 35488),
("2001-03-01T", 35489),
("2001-03-31T", 35519),
("2001-04-01T", 35520),
("2001-04-30T", 35549),
("2001-05-01T", 35550),
("2001-05-31T", 35580),
("2001-06-01T", 35581),
("2001-06-30T", 35610),
("2001-07-01T", 35611),
("2001-07-31T", 35641),
("2001-08-01T", 35642),
("2001-08-31T", 35672),
("2001-09-01T", 35673),
("2001-09-30T", 35702),
("2001-10-01T", 35703),
("2001-10-31T", 35733),
("2001-11-01T", 35734),
("2001-11-30T", 35763),
("2001-12-01T", 35764),
("2001-12-31T", 35794),
("2400-01-01T", 181161),
("2400-01-31T", 181191),
("2400-02-01T", 181192),
("2400-02-29T", 181220),
("2400-03-01T", 181221),
("2400-03-31T", 181251),
("2400-04-01T", 181252),
("2400-04-30T", 181281),
("2400-05-01T", 181282),
("2400-05-31T", 181312),
("2400-06-01T", 181313),
("2400-06-30T", 181342),
("2400-07-01T", 181343),
("2400-07-31T", 181373),
("2400-08-01T", 181374),
("2400-08-31T", 181404),
("2400-09-01T", 181405),
("2400-09-30T", 181434),
("2400-10-01T", 181435),
("2400-10-31T", 181465),
("2400-11-01T", 181466),
("2400-11-30T", 181495),
("2400-12-01T", 181496),
("2400-12-31T", 181526),
("4000-01-01T", 765549),
("4000-01-31T", 765579),
("4000-02-01T", 765580),
("4000-02-29T", 765608),
("4000-03-01T", 765609),
("4000-03-31T", 765639),
("4000-04-01T", 765640),
("4000-04-30T", 765669),
("4000-05-01T", 765670),
("4000-05-31T", 765700),
("4000-06-01T", 765701),
("4000-06-30T", 765730),
("4000-07-01T", 765731),
("4000-07-31T", 765761),
("4000-08-01T", 765762),
("4000-08-31T", 765792),
("4000-09-01T", 765793),
("4000-09-30T", 765822),
("4000-10-01T", 765823),
("4000-10-31T", 765853),
("4000-11-01T", 765854),
("4000-11-30T", 765883),
("4000-12-01T", 765884),
("4000-12-31T", 765914),
("4321-01-01T", 882792),
("4321-01-31T", 882822),
("4321-02-01T", 882823),
("4321-02-28T", 882850),
("4321-03-01T", 882851),
("4321-03-31T", 882881),
("4321-04-01T", 882882),
("4321-04-30T", 882911),
("4321-05-01T", 882912),
("4321-05-31T", 882942),
("4321-06-01T", 882943),
("4321-06-30T", 882972),
("4321-07-01T", 882973),
("4321-07-31T", 883003),
("4321-08-01T", 883004),
("4321-08-31T", 883034),
("4321-09-01T", 883035),
("4321-09-30T", 883064),
("4321-10-01T", 883065),
("4321-10-31T", 883095),
("4321-11-01T", 883096),
("4321-11-30T", 883125),
("4321-12-01T", 883126),
("4321-12-31T", 883156),
("9999-01-01T", 2956639),
("9999-01-31T", 2956669),
("9999-02-01T", 2956670),
("9999-02-28T", 2956697),
("9999-03-01T", 2956698),
("9999-03-31T", 2956728),
("9999-04-01T", 2956729),
("9999-04-30T", 2956758),
("9999-05-01T", 2956759),
("9999-05-31T", 2956789),
("9999-06-01T", 2956790),
("9999-06-30T", 2956819),
("9999-07-01T", 2956820),
("9999-07-31T", 2956850),
("9999-08-01T", 2956851),
("9999-08-31T", 2956881),
("9999-09-01T", 2956882),
("9999-09-30T", 2956911),
("9999-10-01T", 2956912),
("9999-10-31T", 2956942),
("9999-11-01T", 2956943),
("9999-11-30T", 2956972),
("9999-12-01T", 2956973),
("9999-12-31T", 2957003),
]
def test_convert_date_time_datetime(self):
"""Test the _convert_date_time() method with datetime objects."""
for excel_date in self.excel_dates:
test_date = datetime.datetime.strptime(excel_date[0], "%Y-%m-%dT")
got = self.worksheet._convert_date_time(test_date)
exp = excel_date[1]
self.assertEqual(exp, got)
def test_convert_date_time_date(self):
"""Test the _convert_date_time() method with date objects."""
for excel_date in self.excel_dates:
date_str = excel_date[0].rstrip("T")
(year, month, day) = date_str.split("-")
test_date = datetime.date(int(year), int(month), int(day))
got = self.worksheet._convert_date_time(test_date)
exp = excel_date[1]
self.assertEqual(exp, got)
def test_convert_date_time_1904(self):
"""Test the _convert_date_time() method with 1904 date system."""
self.worksheet.date_1904 = True
self.worksheet.epoch = datetime.datetime(1904, 1, 1)
for excel_date in self.excel_1904_dates:
date = datetime.datetime.strptime(excel_date[0], "%Y-%m-%dT")
got = self.worksheet._convert_date_time(date)
exp = excel_date[1]
self.assertEqual(exp, got)
| TestConvertDateTime |
python | apache__airflow | airflow-core/tests/unit/security/test_kerberos.py | {
"start": 1114,
"end": 12191
} | class ____:
@pytest.fixture(autouse=True)
def fresh_detect_conf_var(self):
"""Clear cache of kerberos detection function."""
detect_conf_var.cache_clear()
@pytest.mark.parametrize(
("kerberos_config", "expected_cmd"),
[
(
{("kerberos", "reinit_frequency"): "42"},
[
"kinit",
"-f",
"-a",
"-r",
"42m",
"-k",
"-t",
"keytab",
"-c",
"/tmp/airflow_krb5_ccache",
"test-principal",
],
),
(
{("kerberos", "forwardable"): "True", ("kerberos", "include_ip"): "True"},
[
"kinit",
"-f",
"-a",
"-r",
"3600m",
"-k",
"-t",
"keytab",
"-c",
"/tmp/airflow_krb5_ccache",
"test-principal",
],
),
(
{("kerberos", "forwardable"): "False", ("kerberos", "include_ip"): "False"},
[
"kinit",
"-F",
"-A",
"-r",
"3600m",
"-k",
"-t",
"keytab",
"-c",
"/tmp/airflow_krb5_ccache",
"test-principal",
],
),
],
)
@mock.patch("time.sleep", return_value=None)
@mock.patch("airflow.security.kerberos.open", mock.mock_open(read_data=b"X-CACHECONF:"))
@mock.patch("airflow.security.kerberos.subprocess")
def test_renew_from_kt(self, mock_subprocess, mock_sleep, kerberos_config, expected_cmd, caplog):
expected_cmd_text = " ".join(shlex.quote(f) for f in expected_cmd)
with conf_vars(kerberos_config), caplog.at_level(logging.INFO, logger=kerberos.log.name):
caplog.clear()
mock_subprocess.Popen.return_value.__enter__.return_value.returncode = 0
mock_subprocess.call.return_value = 0
renew_from_kt(principal="test-principal", keytab="keytab")
assert caplog.messages == [
f"Re-initialising kerberos from keytab: {expected_cmd_text}",
"Renewing kerberos ticket to work around kerberos 1.8.1: kinit -c /tmp/airflow_krb5_ccache -R",
]
assert mock_subprocess.Popen.call_args.args[0] == expected_cmd
assert mock_subprocess.mock_calls == [
mock.call.Popen(
expected_cmd,
bufsize=-1,
close_fds=True,
stderr=mock_subprocess.PIPE,
stdout=mock_subprocess.PIPE,
universal_newlines=True,
),
mock.call.Popen().__enter__(),
mock.call.Popen().__enter__().wait(),
mock.call.Popen().__exit__(None, None, None),
mock.call.call(["kinit", "-c", "/tmp/airflow_krb5_ccache", "-R"], close_fds=True),
]
@mock.patch("airflow.security.kerberos.subprocess")
@mock.patch("airflow.security.kerberos.open", mock.mock_open(read_data=b""))
def test_renew_from_kt_without_workaround(self, mock_subprocess, caplog):
mock_subprocess.Popen.return_value.__enter__.return_value.returncode = 0
mock_subprocess.call.return_value = 0
with caplog.at_level(logging.INFO, logger=kerberos.log.name):
caplog.clear()
renew_from_kt(principal="test-principal", keytab="keytab")
assert caplog.messages == [
"Re-initialising kerberos from keytab: "
"kinit -f -a -r 3600m -k -t keytab -c /tmp/airflow_krb5_ccache test-principal"
]
assert mock_subprocess.mock_calls == [
mock.call.Popen(
[
"kinit",
"-f",
"-a",
"-r",
"3600m",
"-k",
"-t",
"keytab",
"-c",
"/tmp/airflow_krb5_ccache",
"test-principal",
],
bufsize=-1,
close_fds=True,
stderr=mock_subprocess.PIPE,
stdout=mock_subprocess.PIPE,
universal_newlines=True,
),
mock.call.Popen().__enter__(),
mock.call.Popen().__enter__().wait(),
mock.call.Popen().__exit__(None, None, None),
]
@mock.patch("airflow.security.kerberos.subprocess")
def test_renew_from_kt_failed(self, mock_subprocess, caplog):
mock_subp = mock_subprocess.Popen.return_value.__enter__.return_value
mock_subp.returncode = 1
mock_subp.stdout = mock.MagicMock(name="stdout", **{"readlines.return_value": ["STDOUT"]})
mock_subp.stderr = mock.MagicMock(name="stderr", **{"readlines.return_value": ["STDERR"]})
caplog.clear()
with pytest.raises(SystemExit) as ctx:
renew_from_kt(principal="test-principal", keytab="keytab")
assert ctx.value.code == 1
log_records = [record for record in caplog.record_tuples if record[0] == kerberos.log.name]
assert len(log_records) == 2, log_records
assert [lr[1] for lr in log_records] == [logging.INFO, logging.ERROR]
assert [lr[2] for lr in log_records] == [
"Re-initialising kerberos from keytab: "
"kinit -f -a -r 3600m -k -t keytab -c /tmp/airflow_krb5_ccache test-principal",
"Couldn't reinit from keytab! `kinit` exited with 1.\nSTDOUT\nSTDERR",
]
assert mock_subprocess.mock_calls == [
mock.call.Popen(
[
"kinit",
"-f",
"-a",
"-r",
"3600m",
"-k",
"-t",
"keytab",
"-c",
"/tmp/airflow_krb5_ccache",
"test-principal",
],
bufsize=-1,
close_fds=True,
stderr=mock_subprocess.PIPE,
stdout=mock_subprocess.PIPE,
universal_newlines=True,
),
mock.call.Popen().__enter__(),
mock.call.Popen().__enter__().wait(),
mock.call.Popen().__exit__(mock.ANY, mock.ANY, mock.ANY),
]
@mock.patch("airflow.security.kerberos.subprocess")
@mock.patch("airflow.security.kerberos.open", mock.mock_open(read_data=b"X-CACHECONF:"))
@mock.patch("airflow.security.kerberos.get_hostname", return_value="HOST")
@mock.patch("time.sleep", return_value=None)
def test_renew_from_kt_failed_workaround(self, mock_sleep, mock_getfqdn, mock_subprocess, caplog):
mock_subprocess.Popen.return_value.__enter__.return_value.returncode = 0
mock_subprocess.call.return_value = 1
caplog.clear()
with pytest.raises(SystemExit) as ctx:
renew_from_kt(principal="test-principal", keytab="keytab")
assert ctx.value.code == 1
log_records = [record for record in caplog.record_tuples if record[0] == kerberos.log.name]
assert len(log_records) == 3, log_records
assert [lr[1] for lr in log_records] == [logging.INFO, logging.INFO, logging.ERROR]
assert [lr[2] for lr in log_records] == [
"Re-initialising kerberos from keytab: "
"kinit -f -a -r 3600m -k -t keytab -c /tmp/airflow_krb5_ccache test-principal",
"Renewing kerberos ticket to work around kerberos 1.8.1: kinit -c /tmp/airflow_krb5_ccache -R",
"Couldn't renew kerberos ticket in order to work around "
"Kerberos 1.8.1 issue. Please check that the ticket for 'test-principal/HOST' is still "
"renewable:\n $ kinit -f -c /tmp/airflow_krb5_ccache\n"
"If the 'renew until' date is the same as the 'valid starting' date, the ticket cannot be "
"renewed. Please check your KDC configuration, and the ticket renewal policy (maxrenewlife) for "
"the 'test-principal/HOST' and `krbtgt' principals.",
]
assert mock_subprocess.mock_calls == [
mock.call.Popen(
[
"kinit",
"-f",
"-a",
"-r",
"3600m",
"-k",
"-t",
"keytab",
"-c",
"/tmp/airflow_krb5_ccache",
"test-principal",
],
bufsize=-1,
close_fds=True,
stderr=mock_subprocess.PIPE,
stdout=mock_subprocess.PIPE,
universal_newlines=True,
),
mock.call.Popen().__enter__(),
mock.call.Popen().__enter__().wait(),
mock.call.Popen().__exit__(None, None, None),
mock.call.call(["kinit", "-c", "/tmp/airflow_krb5_ccache", "-R"], close_fds=True),
]
def test_run_without_keytab(self, caplog):
with caplog.at_level(logging.WARNING, logger=kerberos.log.name):
caplog.clear()
with pytest.raises(SystemExit) as ctx:
kerberos.run(principal="test-principal", keytab=None)
assert ctx.value.code == 0
assert caplog.messages == ["Keytab renewer not starting, no keytab configured"]
@mock.patch("airflow.security.kerberos.renew_from_kt")
@mock.patch("time.sleep", return_value=None)
def test_run(self, mock_sleep, mock_renew_from_kt):
mock_renew_from_kt.side_effect = [1, 1, SystemExit(42)]
with pytest.raises(SystemExit) as ctx:
kerberos.run(principal="test-principal", keytab="/tmp/keytab")
assert ctx.value.code == 42
assert mock_renew_from_kt.mock_calls == [
mock.call("test-principal", "/tmp/keytab"),
mock.call("test-principal", "/tmp/keytab"),
mock.call("test-principal", "/tmp/keytab"),
]
def test_get_kerberos_principal(self):
expected_principal = "test-principal"
principal = get_kerberos_principal(expected_principal)
assert principal == expected_principal
@mock.patch("airflow.security.kerberos.get_hostname", return_value="REPLACEMENT_HOST")
@mock.patch("airflow.security.kerberos.conf.get_mandatory_value", return_value="test-principal/_HOST")
def test_get_kerberos_principal_resolve_null_principal(self, get_madantory_value_mock, get_hostname_mock):
principal = get_kerberos_principal(principal=None)
assert principal == "test-principal/REPLACEMENT_HOST"
| TestKerberos |
python | doocs__leetcode | solution/1200-1299/1237.Find Positive Integer Solution for a Given Equation/Solution2.py | {
"start": 349,
"end": 764
} | class ____:
def findSolution(self, customfunction: "CustomFunction", z: int) -> List[List[int]]:
ans = []
x, y = 1, 1000
while x <= 1000 and y:
t = customfunction.f(x, y)
if t < z:
x += 1
elif t > z:
y -= 1
else:
ans.append([x, y])
x, y = x + 1, y - 1
return ans
| Solution |
python | bokeh__bokeh | src/bokeh/models/widgets/tables.py | {
"start": 20967,
"end": 22407
} | class ____(Model):
''' Table column widget.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
field = Required(String, help="""
The name of the field mapping to a column in the data source.
""")
title = Nullable(String, help="""
The title of this column. If not set, column's data field is
used instead.
""")
width = Int(300, help="""
The width or maximum width (depending on data table's configuration)
in pixels of this column.
""")
formatter = Instance(CellFormatter, InstanceDefault(StringFormatter), help="""
The cell formatter for this column. By default, a simple string
formatter is used.
""")
editor = Instance(CellEditor, InstanceDefault(StringEditor), help="""
The cell editor for this column. By default, a simple string editor
is used.
""")
sortable = Bool(True, help="""
Whether this column is sortable or not. Note that data table has
to have sorting enabled to allow sorting in general.
""")
default_sort = Enum("ascending", "descending", help="""
The default sorting order. By default ``ascending`` order is used.
""")
visible = Bool(True, help="""
Whether this column should be displayed or not.
""")
sorter = Nullable(Instance(Comparison), help="""
""")
@abstract
| TableColumn |
python | walkccc__LeetCode | solutions/1284. Minimum Number of Flips to Convert Binary Matrix to Zero Matrix/1284.py | {
"start": 0,
"end": 1084
} | class ____:
def minFlips(self, mat: list[list[int]]) -> int:
m = len(mat)
n = len(mat[0])
hash = self._getHash(mat, m, n)
if hash == 0:
return 0
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
step = 0
q = collections.deque([hash])
seen = {hash}
while q:
step += 1
for _ in range(len(q)):
curr = q.popleft()
for i in range(m):
for j in range(n):
next = curr ^ 1 << (i * n + j)
# Flie the four neighbors.
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
next ^= 1 << (x * n + y)
if next == 0:
return step
if next in seen:
continue
q.append(next)
seen.add(next)
return -1
def _getHash(self, mat: list[list[int]], m: int, n: int) -> int:
hash = 0
for i in range(m):
for j in range(n):
if mat[i][j]:
hash |= 1 << (i * n + j)
return hash
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-sheets/unit_tests/integration/test_source.py | {
"start": 1315,
"end": 5074
} | class ____(GoogleSheetsBaseTest):
@HttpMocker()
def test_given_spreadsheet_when_check_then_status_is_succeeded(self, http_mocker: HttpMocker) -> None:
TestSourceCheck.get_spreadsheet_info_and_sheets(http_mocker, "check_succeeded_meta")
TestSourceCheck.get_sheet_first_row(http_mocker, "check_succeeded_range")
output = self._check(self._config, expecting_exception=False)
expected_message = AirbyteMessage(type=Type.LOG, log=AirbyteLogMessage(level=Level.INFO, message="Check succeeded"))
assert output.logs[-1] == expected_message
@HttpMocker()
def test_check_expected_to_read_data_from_1_sheet(self, http_mocker: HttpMocker) -> None:
TestSourceCheck.get_spreadsheet_info_and_sheets(http_mocker, "check_succeeded_meta", 200)
TestSourceCheck.get_sheet_first_row(http_mocker, "check_wrong_range", 200)
error_message = (
f"Unable to read the schema of sheet. Error: Unexpected return result: Sheet was expected to contain data on exactly 1 sheet."
)
output = self._check(self._config, expecting_exception=True)
trace_message = AirbyteTraceMessage(
type=TraceType.ERROR,
emitted_at=ANY,
error=AirbyteErrorTraceMessage(
message=error_message,
internal_message=ANY,
failure_type=FailureType.config_error,
stack_trace=ANY,
),
)
expected_message = AirbyteMessage(type=Type.TRACE, trace=trace_message)
assert output.errors[-1] == expected_message
@HttpMocker()
def test_check_duplicated_headers(self, http_mocker: HttpMocker) -> None:
# With headers, we refer to properties that will be used for schema
TestSourceCheck.get_spreadsheet_info_and_sheets(http_mocker, "check_succeeded_meta", 200)
TestSourceCheck.get_sheet_first_row(http_mocker, "check_duplicate_headers", 200)
error_message = f"The following duplicate headers were found in the sheet. Please fix them to continue: ['header1']"
output = self._check(self._config, expecting_exception=True)
trace_message = AirbyteTraceMessage(
type=TraceType.ERROR,
emitted_at=ANY,
error=AirbyteErrorTraceMessage(
message=error_message,
internal_message=ANY,
failure_type=FailureType.system_error,
stack_trace=ANY,
),
)
assert output.is_in_logs("Duplicate headers")
@HttpMocker()
def test_given_grid_sheet_type_with_at_least_one_row_when_discover_then_return_stream(self, http_mocker: HttpMocker) -> None:
TestSourceCheck.get_spreadsheet_info_and_sheets(http_mocker, "only_headers_meta", 200)
TestSourceCheck.get_sheet_first_row(http_mocker, "only_headers_range", 200)
expected_schema = {
"$schema": "https://json-schema.org/draft-07/schema#",
"additionalProperties": True,
"properties": {"header1": {"type": ["null", "string"]}, "header2": {"type": ["null", "string"]}},
"type": "object",
}
expected_catalog = AirbyteCatalog(
streams=[
AirbyteStream(
name="a_stream_name",
json_schema=expected_schema,
supported_sync_modes=[SyncMode.full_refresh],
is_resumable=False,
is_file_based=False,
)
]
)
expected_message = AirbyteMessage(type=Type.CATALOG, catalog=expected_catalog)
output = self._discover(self._config, expecting_exception=False)
assert output.catalog == expected_message
| TestSourceCheck |
python | apache__airflow | providers/sftp/src/airflow/providers/sftp/hooks/sftp.py | {
"start": 27382,
"end": 34199
} | class ____(BaseHook):
"""
Interact with an SFTP server via asyncssh package.
:param sftp_conn_id: SFTP connection ID to be used for connecting to SFTP server
:param host: hostname of the SFTP server
:param port: port of the SFTP server
:param username: username used when authenticating to the SFTP server
:param password: password used when authenticating to the SFTP server.
Can be left blank if using a key file
:param known_hosts: path to the known_hosts file on the local file system. Defaults to ``~/.ssh/known_hosts``.
:param key_file: path to the client key file used for authentication to SFTP server
:param passphrase: passphrase used with the key_file for authentication to SFTP server
"""
conn_name_attr = "ssh_conn_id"
default_conn_name = "sftp_default"
conn_type = "sftp"
hook_name = "SFTP"
default_known_hosts = "~/.ssh/known_hosts"
def __init__( # nosec: B107
self,
sftp_conn_id: str = default_conn_name,
host: str = "",
port: int = 22,
username: str = "",
password: str = "",
known_hosts: str = default_known_hosts,
key_file: str = "",
passphrase: str = "",
private_key: str = "",
) -> None:
self.sftp_conn_id = sftp_conn_id
self.host = host
self.port = port
self.username = username
self.password = password
self.known_hosts: bytes | str = os.path.expanduser(known_hosts)
self.key_file = key_file
self.passphrase = passphrase
self.private_key = private_key
def _parse_extras(self, conn: Connection) -> None:
"""Parse extra fields from the connection into instance fields."""
extra_options = conn.extra_dejson
if "key_file" in extra_options and self.key_file == "":
self.key_file = extra_options["key_file"]
if "known_hosts" in extra_options and self.known_hosts != self.default_known_hosts:
self.known_hosts = extra_options["known_hosts"]
if ("passphrase" or "private_key_passphrase") in extra_options:
self.passphrase = extra_options["passphrase"]
if "private_key" in extra_options:
self.private_key = extra_options["private_key"]
host_key = extra_options.get("host_key")
nhkc_raw = extra_options.get("no_host_key_check")
no_host_key_check = True if nhkc_raw is None else (str(nhkc_raw).lower() == "true")
if host_key is not None and no_host_key_check:
raise ValueError("Host key check was skipped, but `host_key` value was given")
if no_host_key_check:
self.log.warning("No Host Key Verification. This won't protect against Man-In-The-Middle attacks")
self.known_hosts = "none"
elif host_key is not None:
self.known_hosts = f"{conn.host} {host_key}".encode()
async def _get_conn(self) -> asyncssh.SSHClientConnection:
"""
Asynchronously connect to the SFTP server as an SSH client.
The following parameters are provided either in the extra json object in
the SFTP connection definition
- key_file
- known_hosts
- passphrase
"""
conn = await sync_to_async(self.get_connection)(self.sftp_conn_id)
if conn.extra is not None:
self._parse_extras(conn) # type: ignore[arg-type]
conn_config: dict[str, Any] = {
"host": conn.host,
"port": conn.port,
"username": conn.login,
"password": conn.password,
}
if self.key_file:
conn_config.update(client_keys=self.key_file)
if self.known_hosts:
if self.known_hosts.lower() == "none":
conn_config.update(known_hosts=None)
else:
conn_config.update(known_hosts=self.known_hosts)
if self.private_key:
_private_key = asyncssh.import_private_key(self.private_key, self.passphrase)
conn_config["client_keys"] = [_private_key]
if self.passphrase:
conn_config.update(passphrase=self.passphrase)
ssh_client_conn = await asyncssh.connect(**conn_config)
return ssh_client_conn
async def list_directory(self, path: str = "") -> list[str] | None: # type: ignore[return]
"""Return a list of files on the SFTP server at the provided path."""
async with await self._get_conn() as ssh_conn:
sftp_client = await ssh_conn.start_sftp_client()
try:
files = await sftp_client.listdir(path)
return sorted(files)
except asyncssh.SFTPNoSuchFile:
return None
async def read_directory(self, path: str = "") -> Sequence[asyncssh.sftp.SFTPName] | None: # type: ignore[return]
"""Return a list of files along with their attributes on the SFTP server at the provided path."""
async with await self._get_conn() as ssh_conn:
sftp_client = await ssh_conn.start_sftp_client()
try:
return await sftp_client.readdir(path)
except asyncssh.SFTPNoSuchFile:
return None
async def get_files_and_attrs_by_pattern(
self, path: str = "", fnmatch_pattern: str = ""
) -> Sequence[asyncssh.sftp.SFTPName]:
"""
Get the files along with their attributes matching the pattern (e.g. ``*.pdf``) at the provided path.
if one exists. Otherwise, raises an AirflowException to be handled upstream for deferring
"""
files_list = await self.read_directory(path)
if files_list is None:
raise FileNotFoundError(f"No files at path {path!r} found...")
matched_files = [file for file in files_list if fnmatch(str(file.filename), fnmatch_pattern)]
return matched_files
async def get_mod_time(self, path: str) -> str: # type: ignore[return]
"""
Make SFTP async connection.
Looks for last modified time in the specific file path and returns last modification time for
the file path.
:param path: full path to the remote file
"""
async with await self._get_conn() as ssh_conn:
try:
sftp_client = await ssh_conn.start_sftp_client()
ftp_mdtm = await sftp_client.stat(path)
modified_time = ftp_mdtm.mtime
mod_time = datetime.datetime.fromtimestamp(modified_time).strftime("%Y%m%d%H%M%S") # type: ignore[arg-type]
self.log.info("Found File %s last modified: %s", str(path), str(mod_time))
return mod_time
except asyncssh.SFTPNoSuchFile:
raise AirflowException("No files matching")
| SFTPHookAsync |
python | google__pytype | pytype/rewrite/abstract/classes.py | {
"start": 414,
"end": 506
} | class ____(Protocol):
members: dict[str, base.BaseValue]
@dataclasses.dataclass
| _HasMembers |
python | getsentry__sentry | src/sentry/interfaces/security.py | {
"start": 1961,
"end": 2563
} | class ____(SecurityReport):
"""
A HTTP Public Key Pinning pin validation failure report.
See also: https://tools.ietf.org/html/rfc7469#section-3
>>> {
>>> "date-time": "2014-04-06T13:00:50Z",
>>> "hostname": "www.example.com",
>>> "port": 443,
>>> "effective-expiration-date": "2014-05-01T12:40:50Z",
>>> "include-subdomains": False,
>>> "served-certificate-chain": [],
>>> "validated-certificate-chain": [],
>>> "known-pins": [],
>>> }
"""
score = 1300
display_score = 1300
title = "HPKP Report"
| Hpkp |
python | pytorch__pytorch | test/onnx/exporter/test_verification.py | {
"start": 4180,
"end": 5456
} | class ____(common_utils.TestCase):
def test_verify_onnx_program(self):
class Model(torch.nn.Module):
def forward(self, a, b):
c = a + b
return c - 1, c
model = Model()
args = (torch.tensor([1.0]), torch.tensor([2.0]))
onnx_program = torch.onnx.export(model, args, dynamo=True, verbose=False)
assert onnx_program is not None
verification_infos = _verification.verify_onnx_program(
onnx_program, args, compare_intermediates=False
)
self.assertEqual(len(verification_infos), 2)
def test_verify_onnx_program_with_compare_intermediates_true(self):
class Model(torch.nn.Module):
def forward(self, a, b):
c = a + b
return c - 1, c
model = Model()
args = (torch.tensor([1.0]), torch.tensor([2.0]))
onnx_program = torch.onnx.export(model, args, dynamo=True, verbose=False)
assert onnx_program is not None
verification_infos = _verification.verify_onnx_program(
onnx_program, args, compare_intermediates=True
)
self.assertEqual(len(verification_infos), 3)
if __name__ == "__main__":
common_utils.run_tests()
| VerificationFunctionsTest |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 3708,
"end": 3851
} | class ____(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
| CloseReasonHandler |
python | pyca__cryptography | tests/hazmat/primitives/test_serialization.py | {
"start": 55458,
"end": 58545
} | class ____:
def test_load_der_private_key(self, backend):
data = load_vectors_from_file(
os.path.join("asymmetric", "X448", "x448-pkcs8-enc.der"),
lambda derfile: derfile.read(),
mode="rb",
)
unencrypted = load_vectors_from_file(
os.path.join("asymmetric", "X448", "x448-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key = load_der_private_key(data, b"password", backend)
assert (
key.private_bytes(
Encoding.DER, PrivateFormat.PKCS8, NoEncryption()
)
== unencrypted
)
def test_load_pem_private_key(self, backend):
data = load_vectors_from_file(
os.path.join("asymmetric", "X448", "x448-pkcs8-enc.pem"),
lambda pemfile: pemfile.read(),
mode="rb",
)
unencrypted = load_vectors_from_file(
os.path.join("asymmetric", "X448", "x448-pkcs8.pem"),
lambda pemfile: pemfile.read(),
mode="rb",
)
key = load_pem_private_key(data, b"password", backend)
assert (
key.private_bytes(
Encoding.PEM, PrivateFormat.PKCS8, NoEncryption()
)
== unencrypted
)
@pytest.mark.parametrize(
("key_path", "encoding", "loader"),
[
(["X448", "x448-pub.pem"], Encoding.PEM, load_pem_public_key),
(["X448", "x448-pub.der"], Encoding.DER, load_der_public_key),
],
)
def test_load_public_key(self, key_path, encoding, loader, backend):
data = load_vectors_from_file(
os.path.join("asymmetric", *key_path),
lambda pemfile: pemfile.read(),
mode="rb",
)
public_key = loader(data, backend)
assert (
public_key.public_bytes(
encoding, PublicFormat.SubjectPublicKeyInfo
)
== data
)
def test_openssl_serialization_unsupported(self, backend):
key = x448.X448PrivateKey.generate()
with pytest.raises(ValueError):
key.private_bytes(
Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
NoEncryption(),
)
with pytest.raises(ValueError):
key.private_bytes(
Encoding.DER,
PrivateFormat.TraditionalOpenSSL,
NoEncryption(),
)
def test_openssh_serialization_unsupported(self, backend):
key = x448.X448PrivateKey.generate()
with pytest.raises(ValueError):
key.public_key().public_bytes(
Encoding.OpenSSH, PublicFormat.OpenSSH
)
with pytest.raises(ValueError):
key.private_bytes(
Encoding.PEM, PrivateFormat.OpenSSH, NoEncryption()
)
@pytest.mark.supported(
only_if=lambda backend: backend.x25519_supported(),
skip_message="Requires OpenSSL with X25519 support",
)
| TestX448Serialization |
python | bokeh__bokeh | src/bokeh/resources.py | {
"start": 19016,
"end": 23412
} | class ____:
""" Internal class used to parse kwargs for server URL, app_path, and session_id."""
_url: str
_session_id: ID | None
def __init__(self, *, url: str = DEFAULT_SERVER_HTTP_URL, session_id: ID | None = None) -> None:
self._url = url
if self._url == "default":
self._url = DEFAULT_SERVER_HTTP_URL
if self._url.startswith("ws"):
raise ValueError("url should be the http or https URL for the server, not the websocket URL")
self._url = self._url.rstrip("/")
# we lazy-generate the session_id so we can generate it server-side when appropriate
self._session_id = session_id
# Properties --------------------------------------------------------------
@property
def url(self) -> str:
return self._url
@property
def session_id(self) -> ID:
""" Session ID derived from the kwargs provided."""
if self._session_id is None:
self._session_id = generate_session_id()
return self._session_id
@property
def session_id_allowing_none(self) -> ID | None:
""" Session ID provided in kwargs, keeping it None if it hasn't been generated yet.
The purpose of this is to preserve ``None`` as long as possible... in some cases
we may never generate the session ID because we generate it on the server.
"""
return self._session_id
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
_DEV_PAT = re.compile(r"^(\d)+\.(\d)+\.(\d)+(\.dev|rc)")
def _cdn_base_url() -> str:
return "https://cdn.bokeh.org"
def _get_cdn_urls(version: str | None = None, minified: bool = True) -> Urls:
if version is None:
docs_cdn = settings.docs_cdn()
version = docs_cdn if docs_cdn else __version__.split("+")[0]
base_url = _cdn_base_url()
container = "bokeh/dev" if _DEV_PAT.match(version) else "bokeh/release"
def mk_filename(comp: str, kind: Kind) -> str:
return f"{comp}-{version}{'.min' if minified else ''}.{kind}"
def mk_url(comp: str, kind: Kind) -> str:
return f"{base_url}/{container}/" + mk_filename(comp, kind)
result = Urls(urls=lambda components, kind: [mk_url(component, kind) for component in components])
if len(__version__.split("+")) > 1:
result.messages.append(RuntimeMessage(
type="warn",
text=(
f"Requesting CDN BokehJS version '{version}' from local development version '{__version__}'. "
"This configuration is unsupported and may not work!"
),
))
if is_full_release(version): # TODO: TypeGuard?
assert version is not None
sri_hashes = get_sri_hashes_for_version(version)
result.hashes = lambda components, kind: {
mk_url(component, kind): sri_hashes[mk_filename(component, kind)] for component in components
}
return result
def _get_server_urls(
root_url: str = DEFAULT_SERVER_HTTP_URL,
minified: bool = True,
path_versioner: PathVersioner | None = None,
) -> Urls:
_minified = ".min" if minified else ""
def mk_url(comp: str, kind: Kind) -> str:
path = f"{kind}/{comp}{_minified}.{kind}"
if path_versioner is not None:
path = path_versioner(path)
return f"{root_url}static/{path}"
return Urls(urls=lambda components, kind: [mk_url(component, kind) for component in components])
def _compute_single_hash(path: Path) -> str:
assert path.suffix == ".js"
from subprocess import PIPE, Popen
digest = f"openssl dgst -sha384 -binary {path}".split()
p1 = Popen(digest, stdout=PIPE)
b64 = "openssl base64 -A".split()
p2 = Popen(b64, stdin=p1.stdout, stdout=PIPE)
out, _ = p2.communicate()
return out.decode("utf-8").strip()
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
ResourcesLike: TypeAlias = Resources | ResourcesMode
CDN = Resources(mode="cdn")
INLINE = Resources(mode="inline")
__all__ = (
"CDN",
"INLINE",
"Resources",
"get_all_sri_versions",
"get_sri_hashes_for_version",
"verify_sri_hashes",
)
| SessionCoordinates |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/77_class__class__nonlocals.py | {
"start": 0,
"end": 120
} | class ____:
class Inner:
nonlocal __class__
__class__ = 42
def f():
__class__
| Outer |
python | tornadoweb__tornado | maint/test/mypy/bad.py | {
"start": 41,
"end": 146
} | class ____(RequestHandler):
def get(self) -> str: # Deliberate type error
return "foo"
| MyHandler |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/repeat_test.py | {
"start": 11370,
"end": 12736
} | class ____(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
dataset_range=[41],
repetitions=[1, 27],
reshuffle_each_iteration=[True, False],
symbolic_checkpoint=[True, False])))
def testRepeat(
self,
verify_fn: Callable[..., None],
dataset_range: int,
repetitions: int,
reshuffle_each_iteration: bool,
symbolic_checkpoint: bool):
def _build_dataset() -> dataset_ops.Dataset:
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = dataset.repeat(repetitions)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=42, reshuffle_each_iteration=reshuffle_each_iteration)
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
return dataset.with_options(options)
verify_fn(
self,
_build_dataset,
num_outputs=dataset_range * repetitions,
assert_items_equal=reshuffle_each_iteration)
if __name__ == "__main__":
test.main()
| RepeatGlobalShuffleCheckpointTest |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 43247,
"end": 43301
} | class ____(Stmt):
__slots__ = ("test", "msg")
| Assert |
python | graphql-python__graphene | graphene/types/enum.py | {
"start": 391,
"end": 485
} | class ____(BaseOptions):
enum = None # type: Enum
deprecation_reason = None
| EnumOptions |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 10478,
"end": 10942
} | class ____(DelegatingLexer):
"""
Subclass of the `VelocityLexer` that highlights unlexed data
with the `HtmlLexer`.
"""
name = 'HTML+Velocity'
aliases = ['html+velocity']
alias_filenames = ['*.html', '*.fhtml']
mimetypes = ['text/html+velocity']
def __init__(self, **options):
super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer,
**options)
| VelocityHtmlLexer |
python | pytorch__pytorch | torch/nn/attention/flex_attention.py | {
"start": 9603,
"end": 9856
} | class ____(NamedTuple):
"""Request which auxiliary outputs to compute from flex_attention.
Each field is a boolean indicating whether that auxiliary output should be computed.
"""
lse: bool = False
max_scores: bool = False
| AuxRequest |
python | tensorflow__tensorflow | tensorflow/python/framework/errors_impl.py | {
"start": 1972,
"end": 8574
} | class ____(Exception):
"""The base class for TensorFlow exceptions.
Usually, TensorFlow will raise a more specific subclass of `OpError` from the
`tf.errors` module.
"""
def __init__(self, node_def, op, message, error_code, *args):
"""Creates a new `OpError` indicating that a particular op failed.
Args:
node_def: The `node_def_pb2.NodeDef` proto representing the op that
failed, if known; otherwise None.
op: The `ops.Operation` that failed, if known; otherwise None. During
eager execution, this field is always `None`.
message: The message string describing the failure.
error_code: The `error_codes_pb2.Code` describing the error.
*args: If not empty, it should contain a dictionary describing details
about the error. This argument is inspired by Abseil payloads:
https://github.com/abseil/abseil-cpp/blob/master/absl/status/status.h
"""
super(OpError, self).__init__()
self._node_def = node_def
self._op = op
self._message = message
self._error_code = error_code
if args:
self._experimental_payloads = args[0]
else:
self._experimental_payloads = {}
def __reduce__(self):
# Allow the subclasses to accept less arguments in their __init__.
init_argspec = tf_inspect.getargspec(self.__class__.__init__)
args = tuple(getattr(self, arg) for arg in init_argspec.args[1:])
return self.__class__, args
@property
def message(self):
"""The error message that describes the error."""
return self._message
@property
def op(self):
"""The operation that failed, if known.
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
`tf.Operation`
object. In that case, this will return `None`, and you should
instead use the `tf.errors.OpError.node_def` to
discover information about the op.
Returns:
The `Operation` that failed, or None.
"""
return self._op
@property
def error_code(self):
"""The integer error code that describes the error."""
return self._error_code
@property
def node_def(self):
"""The `NodeDef` proto representing the op that failed."""
return self._node_def
@property
def experimental_payloads(self):
"""A dictionary describing the details of the error."""
return self._experimental_payloads
def __str__(self):
if self._op is not None:
output = [
"%s\n\nOriginal stack trace for %r:\n" % (
self.message,
self._op.name,
)
]
curr_traceback_list = traceback.format_list(self._op.traceback or [])
output.extend(curr_traceback_list)
# pylint: disable=protected-access
original_op = self._op._original_op
# pylint: enable=protected-access
while original_op is not None:
output.append(
"\n...which was originally created as op %r, defined at:\n" %
(original_op.name,))
prev_traceback_list = curr_traceback_list
curr_traceback_list = traceback.format_list(original_op.traceback or [])
# Attempt to elide large common subsequences of the subsequent
# stack traces.
#
# TODO(mrry): Consider computing the actual longest common subsequence.
is_eliding = False
elide_count = 0
last_elided_line = None
for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list):
if line == line_in_prev:
if is_eliding:
elide_count += 1
last_elided_line = line
else:
output.append(line)
is_eliding = True
elide_count = 0
else:
if is_eliding:
if elide_count > 0:
output.extend([
"[elided %d identical lines from previous traceback]\n" %
(elide_count - 1,), last_elided_line
])
is_eliding = False
output.extend(line)
# pylint: disable=protected-access
original_op = original_op._original_op
# pylint: enable=protected-access
return "".join(output)
else:
return self.message
OK = error_codes_pb2.OK
tf_export("errors.OK").export_constant(__name__, "OK")
CANCELLED = error_codes_pb2.CANCELLED
tf_export("errors.CANCELLED").export_constant(__name__, "CANCELLED")
UNKNOWN = error_codes_pb2.UNKNOWN
tf_export("errors.UNKNOWN").export_constant(__name__, "UNKNOWN")
INVALID_ARGUMENT = error_codes_pb2.INVALID_ARGUMENT
tf_export("errors.INVALID_ARGUMENT").export_constant(__name__,
"INVALID_ARGUMENT")
DEADLINE_EXCEEDED = error_codes_pb2.DEADLINE_EXCEEDED
tf_export("errors.DEADLINE_EXCEEDED").export_constant(__name__,
"DEADLINE_EXCEEDED")
NOT_FOUND = error_codes_pb2.NOT_FOUND
tf_export("errors.NOT_FOUND").export_constant(__name__, "NOT_FOUND")
ALREADY_EXISTS = error_codes_pb2.ALREADY_EXISTS
tf_export("errors.ALREADY_EXISTS").export_constant(__name__, "ALREADY_EXISTS")
PERMISSION_DENIED = error_codes_pb2.PERMISSION_DENIED
tf_export("errors.PERMISSION_DENIED").export_constant(__name__,
"PERMISSION_DENIED")
UNAUTHENTICATED = error_codes_pb2.UNAUTHENTICATED
tf_export("errors.UNAUTHENTICATED").export_constant(__name__, "UNAUTHENTICATED")
RESOURCE_EXHAUSTED = error_codes_pb2.RESOURCE_EXHAUSTED
tf_export("errors.RESOURCE_EXHAUSTED").export_constant(__name__,
"RESOURCE_EXHAUSTED")
FAILED_PRECONDITION = error_codes_pb2.FAILED_PRECONDITION
tf_export("errors.FAILED_PRECONDITION").export_constant(__name__,
"FAILED_PRECONDITION")
ABORTED = error_codes_pb2.ABORTED
tf_export("errors.ABORTED").export_constant(__name__, "ABORTED")
OUT_OF_RANGE = error_codes_pb2.OUT_OF_RANGE
tf_export("errors.OUT_OF_RANGE").export_constant(__name__, "OUT_OF_RANGE")
UNIMPLEMENTED = error_codes_pb2.UNIMPLEMENTED
tf_export("errors.UNIMPLEMENTED").export_constant(__name__, "UNIMPLEMENTED")
INTERNAL = error_codes_pb2.INTERNAL
tf_export("errors.INTERNAL").export_constant(__name__, "INTERNAL")
UNAVAILABLE = error_codes_pb2.UNAVAILABLE
tf_export("errors.UNAVAILABLE").export_constant(__name__, "UNAVAILABLE")
DATA_LOSS = error_codes_pb2.DATA_LOSS
tf_export("errors.DATA_LOSS").export_constant(__name__, "DATA_LOSS")
@tf_export("errors.CancelledError")
| OpError |
python | modin-project__modin | modin/core/dataframe/pandas/dataframe/utils.py | {
"start": 3505,
"end": 19725
} | class ____(ShuffleFunctions):
"""
Perform the sampling, quantiles picking, and the splitting stages for the range-partitioning building.
Parameters
----------
modin_frame : PandasDataframe
The frame to build the range-partitioning for.
columns : str, list of strings or None
The column/columns to use as a key. Can't be specified along with `level`.
ascending : bool
Whether the ranges should be in ascending or descending order.
ideal_num_new_partitions : int
The ideal number of new partitions.
level : list of strings or ints, or None
Index level(s) to use as a key. Can't be specified along with `columns`.
closed_on_right : bool, default: False
Whether to include the right limit in range-partitioning.
True: bins[i - 1] < x <= bins[i]
False: bins[i - 1] <= x < bins[i]
**kwargs : dict
Additional keyword arguments.
"""
def __init__(
self,
modin_frame: "PandasDataframe",
columns: Optional[Union[str, list]],
ascending: Union[list, bool],
ideal_num_new_partitions: int,
level: Optional[list[Union[str, int]]] = None,
closed_on_right: bool = False,
**kwargs: dict,
):
self.frame_len = len(modin_frame)
self.ideal_num_new_partitions = ideal_num_new_partitions
self.columns = columns if is_list_like(columns) else [columns]
self.ascending = ascending
self.kwargs = kwargs.copy()
self.level = level
self.columns_info = None
self.closed_on_right = closed_on_right
def sample_fn(self, partition: pandas.DataFrame) -> pandas.DataFrame:
if self.level is not None:
partition = self._index_to_df_zero_copy(partition, self.level)
else:
partition = partition[self.columns]
return self.pick_samples_for_quantiles(
partition, self.ideal_num_new_partitions, self.frame_len
)
def pivot_fn(self, samples: "list[pandas.DataFrame]") -> int:
key = self.kwargs.get("key", None)
samples = pandas.concat(samples, axis=0, copy=False)
columns_info: "list[ColumnInfo]" = []
number_of_groups = 1
cols = []
for i, col in enumerate(samples.columns):
num_pivots = int(self.ideal_num_new_partitions / number_of_groups)
if num_pivots < 2 and len(columns_info):
break
column_val = samples[col]
cols.append(col)
is_numeric = is_numeric_dtype(column_val.dtype)
# When we are not sorting numbers, we need our quantiles to not do arithmetic on the values
method = "linear" if is_numeric else "inverted_cdf"
pivots = self.pick_pivots_from_samples_for_sort(
column_val, num_pivots, method, key
)
columns_info.append(
ColumnInfo(
self.level[i] if self.level is not None else col,
pivots,
is_numeric,
)
)
number_of_groups *= len(pivots) + 1
self.columns_info = columns_info
return number_of_groups
def split_fn(
self,
partition: pandas.DataFrame,
) -> "tuple[pandas.DataFrame, ...]":
ErrorMessage.catch_bugs_and_request_email(
failure_condition=self.columns_info is None,
extra_log="The 'split_fn' doesn't have proper metadata, the probable reason is that it was called before 'pivot_fn'",
)
return self.split_partitions_using_pivots_for_sort(
partition,
self.columns_info,
self.ascending,
keys_are_index_levels=self.level is not None,
closed_on_right=self.closed_on_right,
**self.kwargs,
)
@staticmethod
def _find_quantiles(
df: Union[pandas.DataFrame, pandas.Series], quantiles: list, method: str
) -> np.ndarray:
"""
Find quantiles of a given dataframe using the specified method.
We use this method to provide backwards compatibility with NumPy versions < 1.23 (e.g. when
the user is using Modin in compat mode). This is basically a wrapper around `np.quantile` that
ensures we provide the correct `method` argument - i.e. if we are dealing with objects (which
may or may not support algebra), we do not want to use a method to find quantiles that will
involve algebra operations (e.g. mean) between the objects, since that may fail.
Parameters
----------
df : pandas.DataFrame or pandas.Series
The data to pick quantiles from.
quantiles : list[float]
The quantiles to compute.
method : str
The method to use. `linear` if dealing with numeric types, otherwise `inverted_cdf`.
Returns
-------
np.ndarray
A NumPy array with the quantiles of the data.
"""
if method == "linear":
# This is the default method for finding quantiles, so it does not need to be specified,
# which keeps backwards compatibility with older versions of NumPy that do not have a
# `method` keyword argument in np.quantile.
return np.unique(np.quantile(df, quantiles))
else:
try:
return np.unique(np.quantile(df, quantiles, method=method))
except Exception:
# In this case, we're dealing with an array of objects, but the current version of
# NumPy does not have a `method` kwarg. We need to use the older kwarg, `interpolation`
# instead.
return np.unique(np.quantile(df, quantiles, interpolation="lower"))
@staticmethod
def pick_samples_for_quantiles(
df: pandas.DataFrame,
num_partitions: int,
length: int,
) -> pandas.DataFrame:
"""
Pick samples over the given partition.
This function picks samples from the given partition using the TeraSort algorithm - each
value is sampled with probability 1 / m * ln(n * t) where m = total_length / num_partitions,
t = num_partitions, and n = total_length.
Parameters
----------
df : pandas.Dataframe
The masked dataframe to pick samples from.
num_partitions : int
The number of partitions.
length : int
The total length.
Returns
-------
pandas.DataFrame:
The samples for the partition.
Notes
-----
This sampling algorithm is inspired by TeraSort. You can find more information about TeraSort
and the sampling algorithm at https://www.cse.cuhk.edu.hk/~taoyf/paper/sigmod13-mr.pdf.
"""
m = length / num_partitions
probability = (1 / m) * np.log(num_partitions * length)
return df.sample(frac=probability)
def pick_pivots_from_samples_for_sort(
self,
samples: pandas.Series,
ideal_num_new_partitions: int,
method: str = "linear",
key: Optional[Callable] = None,
) -> np.ndarray:
"""
Determine quantiles from the given samples.
This function takes as input the quantiles calculated over all partitions from
`sample_func` defined above, and determines a final NPartitions.get() quantiles
to use to roughly sort the entire dataframe. It does so by collating all the samples
and computing NPartitions.get() quantiles for the overall set.
Parameters
----------
samples : pandas.Series
The samples computed by ``get_partition_quantiles_for_sort``.
ideal_num_new_partitions : int
The ideal number of new partitions.
method : str, default: linear
The method to use when picking quantiles.
key : Callable, default: None
The key to use on the samples when picking pivots.
Returns
-------
np.ndarray
A list of overall quantiles.
"""
samples = samples.to_numpy()
# We don't call `np.unique` on the samples, since if a quantile shows up in multiple
# partition's samples, this is probably an indicator of skew in the dataset, and we
# want our final partitions to take this into account.
if key is not None:
samples = key(samples)
# We don't want to pick very many quantiles if we have a very small dataframe.
num_quantiles = ideal_num_new_partitions
quantiles = [i / num_quantiles for i in range(1, num_quantiles)]
# If we only desire 1 partition, we need to ensure that we're not trying to find quantiles
# from an empty list of pivots.
if len(quantiles) > 0:
return self._find_quantiles(samples, quantiles, method)
return np.array([])
@staticmethod
def split_partitions_using_pivots_for_sort(
df: pandas.DataFrame,
columns_info: "list[ColumnInfo]",
ascending: bool,
keys_are_index_levels: bool = False,
closed_on_right: bool = False,
**kwargs: dict,
) -> "tuple[pandas.DataFrame, ...]":
"""
Split the given dataframe into the partitions specified by `pivots` in `columns_info`.
This function takes as input a row-axis partition, as well as the quantiles determined
by the `pivot_func` defined above. It then splits the input dataframe into NPartitions.get()
dataframes, with the elements in the i-th split belonging to the i-th partition, as determined
by the quantiles we're using.
Parameters
----------
df : pandas.Dataframe
The partition to split.
columns_info : list of ColumnInfo
Information regarding keys and pivots for range partitioning.
ascending : bool
The ascending flag.
keys_are_index_levels : bool, default: False
Whether `columns_info` describes index levels or actual columns from `df`.
closed_on_right : bool, default: False
Whether to include the right limit in range-partitioning.
True: bins[i - 1] < x <= bins[i]
False: bins[i - 1] <= x < bins[i]
**kwargs : dict
Additional keyword arguments.
Returns
-------
tuple[pandas.DataFrame]
A tuple of the splits from this partition.
"""
if len(columns_info) == 0:
# We can return the dataframe with zero changes if there were no pivots passed
return (df,)
key_data = (
ShuffleSortFunctions._index_to_df_zero_copy(
df, [col_info.name for col_info in columns_info]
)
if keys_are_index_levels
else df[[col_info.name for col_info in columns_info]]
)
na_index = key_data.isna().squeeze(axis=1)
if na_index.ndim == 2:
na_index = na_index.any(axis=1)
na_rows = df[na_index]
non_na_rows = df[~na_index]
def get_group(grp, key, df):
"""Get a group with the `key` from the `grp`, if it doesn't exist return an empty slice of `df`."""
try:
return grp.get_group(key)
except KeyError:
return pandas.DataFrame(index=df.index[:0], columns=df.columns).astype(
df.dtypes
)
groupby_codes = []
group_keys = []
for col_info in columns_info:
pivots = col_info.pivots
if len(pivots) == 0:
continue
# If `ascending=False` and we are dealing with a numeric dtype, we can pass in a reversed list
# of pivots, and `np.digitize` will work correctly. For object dtypes, we use `np.searchsorted`
# which breaks when we reverse the pivots.
if not ascending and col_info.is_numeric:
# `key` is already applied to `pivots` in the `pick_pivots_from_samples_for_sort` function.
pivots = pivots[::-1]
group_keys.append(range(len(pivots) + 1))
key = kwargs.pop("key", None)
cols_to_digitize = (
non_na_rows.index.get_level_values(col_info.name)
if keys_are_index_levels
else non_na_rows[col_info.name]
)
if key is not None:
cols_to_digitize = key(cols_to_digitize)
if cols_to_digitize.ndim == 2:
cols_to_digitize = cols_to_digitize.squeeze()
if col_info.is_numeric:
groupby_col = np.digitize(
cols_to_digitize, pivots, right=closed_on_right
)
# `np.digitize` returns results based off of the sort order of the pivots it is passed.
# When we only have one unique value in our pivots, `np.digitize` assumes that the pivots
# are sorted in ascending order, and gives us results based off of that assumption - so if
# we actually want to sort in descending order, we need to swap the new indices.
if not ascending and len(np.unique(pivots)) == 1:
groupby_col = len(pivots) - groupby_col
else:
groupby_col = np.searchsorted(
pivots,
cols_to_digitize,
side="left" if closed_on_right else "right",
)
# Since np.searchsorted requires the pivots to be in ascending order, if we want to sort
# in descending order, we need to swap the new indices.
if not ascending:
groupby_col = len(pivots) - groupby_col
groupby_codes.append(groupby_col)
if len(group_keys) == 0:
# We can return the dataframe with zero changes if there were no pivots passed
return (df,)
elif len(group_keys) == 1:
group_keys = group_keys[0]
else:
group_keys = pandas.MultiIndex.from_product(group_keys)
if len(non_na_rows) == 1:
groups = [
# taking an empty slice for an index's metadata
(
pandas.DataFrame(index=df.index[:0], columns=df.columns).astype(
df.dtypes
)
if key != groupby_codes[0]
else non_na_rows
)
for key in group_keys
]
else:
grouped = non_na_rows.groupby(groupby_codes)
groups = [get_group(grouped, key, df) for key in group_keys]
index_to_insert_na_vals = (
-1 if kwargs.get("na_position", "last") == "last" else 0
)
groups[index_to_insert_na_vals] = pandas.concat(
[groups[index_to_insert_na_vals], na_rows]
).astype(df.dtypes)
return tuple(groups)
@staticmethod
def _index_to_df_zero_copy(
df: pandas.DataFrame, levels: list[Union[str, int]]
) -> pandas.DataFrame:
"""
Convert index `level` of `df` to a ``pandas.DataFrame``.
Parameters
----------
df : pandas.DataFrame
levels : list of labels or ints
Index level to convert to a dataframe.
Returns
-------
pandas.DataFrame
The columns in the resulting dataframe use the same data arrays as the index levels
in the original `df`, so no copies.
"""
# calling 'df.index.to_frame()' creates a copy of the index, so doing the conversion manually
# to avoid the copy
data = {
(
df.index.names[lvl] if isinstance(lvl, int) else lvl
): df.index.get_level_values(lvl)
for lvl in levels
}
index_data = pandas.DataFrame(data, index=df.index, copy=False)
return index_data
@_inherit_docstrings(ShuffleSortFunctions)
| ShuffleSortFunctions |
python | django__django | tests/admin_filters/tests.py | {
"start": 9411,
"end": 9493
} | class ____(ModelAdmin):
list_filter = ["tags__tag"]
| BookmarkAdminGenericRelation |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_radar02.py | {
"start": 315,
"end": 1372
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_radar02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "radar", "subtype": "with_markers"})
chart.axis_ids = [48543616, 48545152]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/count-complete-substrings.py | {
"start": 102,
"end": 1304
} | class ____(object):
def countCompleteSubstrings(self, word, k):
"""
:type word: str
:type k: int
:rtype: int
"""
result = valid = 0
cnt = [0]*26
for c in xrange(1, len(set(word))+1):
left = 0
for right in xrange(len(word)):
cnt[ord(word[right])-ord('a')] += 1
curr = cnt[ord(word[right])-ord('a')]
valid += 1 if curr == k else -1 if curr == k+1 else 0
if right-left+1 == c*k+1:
curr = cnt[ord(word[left])-ord('a')]
valid -= 1 if curr == k else -1 if curr == k+1 else 0
cnt[ord(word[left])-ord('a')] -= 1
left += 1
if valid == c:
result += 1
if right+1 == len(word) or abs(ord(word[right+1])-ord(word[right])) > 2:
while left < right+1:
curr = cnt[ord(word[left])-ord('a')]
valid -= 1 if curr == k else -1 if curr == k+1 else 0
cnt[ord(word[left])-ord('a')] -= 1
left += 1
return result
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/named_types.py | {
"start": 17663,
"end": 17747
} | class ____(schema._CreateDropBase):
__visit_name__ = "drop_enum_type"
| DropEnumType |
python | sympy__sympy | sympy/polys/domains/groundtypes.py | {
"start": 580,
"end": 2102
} | class ____:
def __init__(self, obj):
pass
if GROUND_TYPES == 'gmpy':
from gmpy2 import (
mpz as GMPYInteger,
mpq as GMPYRational,
numer as gmpy_numer,
denom as gmpy_denom,
gcdext as gmpy_gcdex,
gcd as gmpy_gcd,
lcm as gmpy_lcm,
qdiv as gmpy_qdiv,
)
gcdex = gmpy_gcdex
gcd = gmpy_gcd
lcm = gmpy_lcm
elif GROUND_TYPES == 'flint':
from flint import fmpz as _fmpz
GMPYInteger = _GMPYInteger
GMPYRational = _GMPYRational
gmpy_numer = None
gmpy_denom = None
gmpy_gcdex = None
gmpy_gcd = None
gmpy_lcm = None
gmpy_qdiv = None
def gcd(a, b):
return a.gcd(b)
def gcdex(a, b):
x, y, g = python_gcdex(a, b)
return _fmpz(x), _fmpz(y), _fmpz(g)
def lcm(a, b):
return a.lcm(b)
else:
GMPYInteger = _GMPYInteger
GMPYRational = _GMPYRational
gmpy_numer = None
gmpy_denom = None
gmpy_gcdex = None
gmpy_gcd = None
gmpy_lcm = None
gmpy_qdiv = None
gcdex = python_gcdex
gcd = python_gcd
lcm = python_lcm
__all__ = [
'PythonInteger', 'PythonReal', 'PythonComplex',
'PythonRational',
'python_gcdex', 'python_gcd', 'python_lcm',
'SymPyReal', 'SymPyInteger', 'SymPyRational',
'GMPYInteger', 'GMPYRational', 'gmpy_numer',
'gmpy_denom', 'gmpy_gcdex', 'gmpy_gcd', 'gmpy_lcm',
'gmpy_qdiv',
'factorial', 'sqrt', 'is_square', 'sqrtrem',
'GMPYInteger', 'GMPYRational',
]
| _GMPYRational |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 51217,
"end": 51787
} | class ____(NamedTuple):
width: float
stretch: float
stretch_order: int
shrink: float
shrink_order: int
_GlueSpec._named = { # type: ignore[attr-defined]
'fil': _GlueSpec(0., 1., 1, 0., 0),
'fill': _GlueSpec(0., 1., 2, 0., 0),
'filll': _GlueSpec(0., 1., 3, 0., 0),
'neg_fil': _GlueSpec(0., 0., 0, 1., 1),
'neg_fill': _GlueSpec(0., 0., 0, 1., 2),
'neg_filll': _GlueSpec(0., 0., 0, 1., 3),
'empty': _GlueSpec(0., 0., 0, 0., 0),
'ss': _GlueSpec(0., 1., 1, -1., 1),
}
| _GlueSpec |
python | pytorch__pytorch | torch/fx/passes/operator_support.py | {
"start": 781,
"end": 1119
} | class ____(abc.ABC):
"""Interface for determining if a fx.Node is supported by a backend"""
@abc.abstractmethod
def is_node_supported(
self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
) -> bool:
raise NotImplementedError
@compatibility(is_backward_compatible=False)
| OperatorSupportBase |
python | huggingface__transformers | src/transformers/models/time_series_transformer/configuration_time_series_transformer.py | {
"start": 836,
"end": 11695
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`TimeSeriesTransformerModel`]. It is used to
instantiate a Time Series Transformer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Time Series
Transformer
[huggingface/time-series-transformer-tourism-monthly](https://huggingface.co/huggingface/time-series-transformer-tourism-monthly)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
prediction_length (`int`):
The prediction length for the decoder. In other words, the prediction horizon of the model. This value is
typically dictated by the dataset and we recommend to set it appropriately.
context_length (`int`, *optional*, defaults to `prediction_length`):
The context length for the encoder. If `None`, the context length will be the same as the
`prediction_length`.
distribution_output (`string`, *optional*, defaults to `"student_t"`):
The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
loss (`string`, *optional*, defaults to `"nll"`):
The loss function for the model corresponding to the `distribution_output` head. For parametric
distributions it is the negative log likelihood (nll) - which currently is the only supported one.
input_size (`int`, *optional*, defaults to 1):
The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
multivariate targets.
scaling (`string` or `bool`, *optional* defaults to `"mean"`):
Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
scaler is set to "mean".
lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
The lags of the input time series as covariates often dictated by the frequency of the data. Default is
`[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.
num_time_features (`int`, *optional*, defaults to 0):
The number of time features in the input time series.
num_dynamic_real_features (`int`, *optional*, defaults to 0):
The number of dynamic real valued features.
num_static_categorical_features (`int`, *optional*, defaults to 0):
The number of static categorical features.
num_static_real_features (`int`, *optional*, defaults to 0):
The number of static real valued features.
cardinality (`list[int]`, *optional*):
The cardinality (number of different values) for each of the static categorical features. Should be a list
of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
embedding_dimension (`list[int]`, *optional*):
The dimension of the embedding for each of the static categorical features. Should be a list of integers,
having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
d_model (`int`, *optional*, defaults to 64):
Dimensionality of the transformer layers.
encoder_layers (`int`, *optional*, defaults to 2):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 2):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
decoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
`"relu"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the encoder, and decoder.
encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each encoder layer.
decoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each decoder layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability used between the two layers of the feed-forward networks.
num_parallel_samples (`int`, *optional*, defaults to 100):
The number of samples to generate in parallel for each time step of inference.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal weight initialization distribution.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
Example:
```python
>>> from transformers import TimeSeriesTransformerConfig, TimeSeriesTransformerModel
>>> # Initializing a Time Series Transformer configuration with 12 time steps for prediction
>>> configuration = TimeSeriesTransformerConfig(prediction_length=12)
>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = TimeSeriesTransformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "time_series_transformer"
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__(
self,
prediction_length: Optional[int] = None,
context_length: Optional[int] = None,
distribution_output: str = "student_t",
loss: str = "nll",
input_size: int = 1,
lags_sequence: list[int] = [1, 2, 3, 4, 5, 6, 7],
scaling: Optional[Union[str, bool]] = "mean",
num_dynamic_real_features: int = 0,
num_static_categorical_features: int = 0,
num_static_real_features: int = 0,
num_time_features: int = 0,
cardinality: Optional[list[int]] = None,
embedding_dimension: Optional[list[int]] = None,
encoder_ffn_dim: int = 32,
decoder_ffn_dim: int = 32,
encoder_attention_heads: int = 2,
decoder_attention_heads: int = 2,
encoder_layers: int = 2,
decoder_layers: int = 2,
is_encoder_decoder: bool = True,
activation_function: str = "gelu",
d_model: int = 64,
dropout: float = 0.1,
encoder_layerdrop: float = 0.1,
decoder_layerdrop: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
num_parallel_samples: int = 100,
init_std: float = 0.02,
use_cache=True,
**kwargs,
):
# time series specific configuration
self.prediction_length = prediction_length
self.context_length = context_length or prediction_length
self.distribution_output = distribution_output
self.loss = loss
self.input_size = input_size
self.num_time_features = num_time_features
self.lags_sequence = lags_sequence
self.scaling = scaling
self.num_dynamic_real_features = num_dynamic_real_features
self.num_static_real_features = num_static_real_features
self.num_static_categorical_features = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(cardinality) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`"
)
self.cardinality = cardinality
else:
self.cardinality = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(embedding_dimension) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`"
)
self.embedding_dimension = embedding_dimension
else:
self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
self.num_parallel_samples = num_parallel_samples
# Transformer architecture configuration
self.feature_size = input_size * len(lags_sequence) + self._number_of_features
self.d_model = d_model
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.encoder_ffn_dim = encoder_ffn_dim
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_layers = encoder_layers
self.decoder_layers = decoder_layers
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
@property
def _number_of_features(self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
__all__ = ["TimeSeriesTransformerConfig"]
| TimeSeriesTransformerConfig |
python | catalyst-team__catalyst | catalyst/loggers/csv.py | {
"start": 203,
"end": 3701
} | class ____(ILogger):
"""CSV logger for the metrics storing under ``.csv`` file.
Args:
logdir: path to logdir for the logger
use_logdir_postfix: boolean flag to use extra ``logs`` prefix in the logdir
.. note::
This logger is used by default by ``dl.Runner`` and ``dl.SupervisedRunner``
in case of specified logdir during ``runner.train(..., logdir=/path/to/logdir)``.
Examples:
.. code-block:: python
from catalyst import dl
runner = dl.SupervisedRunner()
runner.train(
...,
loggers={"csv": dl.CSVLogger(logdir="./logdir/logs"}
)
.. code-block:: python
from catalyst import dl
class CustomRunner(dl.IRunner):
# ...
def get_loggers(self):
return {
"console": dl.ConsoleLogger(),
"csv": dl.CSVLogger(logdir="./logdir/logs")
}
# ...
runner = CustomRunner().run()
"""
def __init__(self, logdir: str, use_logdir_postfix: bool = False):
"""Init."""
super().__init__(log_batch_metrics=False, log_epoch_metrics=True)
if use_logdir_postfix:
logdir = os.path.join(logdir, "csv_logger")
self.logdir = logdir
self.loggers = {}
os.makedirs(self.logdir, exist_ok=True)
@property
def logger(self):
"""Internal logger/experiment/etc. from the monitoring system."""
return self.loggers
def _make_header(self, metrics: Dict[str, float], loader_key: str):
log_line_header = "step,"
for metric in sorted(metrics.keys()):
log_line_header += metric + ","
log_line_header = log_line_header[:-1] + "\n" # replace last "," with new line
self.loggers[loader_key].write(log_line_header)
def _log_metrics(self, metrics: Dict[str, float], step: int, loader_key: str):
log_line_csv = f"{step},"
for metric in sorted(metrics.keys()):
log_line_csv += str(metrics[metric]) + ","
log_line_csv = log_line_csv[:-1] + "\n" # replace last "," with new line
self.loggers[loader_key].write(log_line_csv)
def log_hparams(self, hparams: Dict, runner: "IRunner" = None) -> None:
"""Logs hyperparameters to the logger."""
save_config(config=hparams, path=os.path.join(self.logdir, "hparams.json"))
def log_metrics(
self,
metrics: Dict[str, float],
scope: str,
runner: "IRunner",
) -> None:
"""Logs epoch metrics to csv file."""
if scope == "epoch":
for loader_key, per_loader_metrics in metrics.items():
if loader_key not in self.loggers.keys():
self.loggers[loader_key] = open(
os.path.join(self.logdir, f"{loader_key}.csv"), "a+"
)
self._make_header(metrics=per_loader_metrics, loader_key=loader_key)
self._log_metrics(
metrics=per_loader_metrics,
step=runner.epoch_step,
loader_key=loader_key,
)
def flush_log(self) -> None:
"""Flushes the logger."""
for logger in self.loggers.values():
logger.flush()
def close_log(self) -> None:
"""Closes the logger."""
for logger in self.loggers.values():
logger.close()
__all__ = ["CSVLogger"]
| CSVLogger |
python | matplotlib__matplotlib | lib/matplotlib/backends/qt_editor/_formlayout.py | {
"start": 3157,
"end": 4957
} | class ____(QtWidgets.QHBoxLayout):
"""Color-specialized QLineEdit layout"""
def __init__(self, color, parent=None):
super().__init__()
assert isinstance(color, QtGui.QColor)
self.lineedit = QtWidgets.QLineEdit(
mcolors.to_hex(color.getRgbF(), keep_alpha=True), parent)
self.lineedit.editingFinished.connect(self.update_color)
self.addWidget(self.lineedit)
self.colorbtn = ColorButton(parent)
self.colorbtn.color = color
self.colorbtn.colorChanged.connect(self.update_text)
self.addWidget(self.colorbtn)
def update_color(self):
color = self.text()
qcolor = to_qcolor(color) # defaults to black if not qcolor.isValid()
self.colorbtn.color = qcolor
def update_text(self, color):
self.lineedit.setText(mcolors.to_hex(color.getRgbF(), keep_alpha=True))
def text(self):
return self.lineedit.text()
def font_is_installed(font):
"""Check if font is installed"""
return [fam for fam in QtGui.QFontDatabase().families()
if str(fam) == font]
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not (isinstance(tup, tuple) and len(tup) == 4
and font_is_installed(tup[0])
and isinstance(tup[1], Integral)
and isinstance(tup[2], bool)
and isinstance(tup[3], bool)):
return None
font = QtGui.QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font
def qfont_to_tuple(font):
return (str(font.family()), int(font.pointSize()),
font.italic(), font.bold())
| ColorLayout |
python | facebookresearch__faiss | faiss/python/__init__.py | {
"start": 12214,
"end": 12494
} | class ____:
def __init__(self, timeout_in_seconds: float):
self.timeout = timeout_in_seconds
def __enter__(self):
TimeoutCallback.reset(self.timeout)
def __exit__(self, exc_type, exc_value, traceback):
PythonInterruptCallback.reset()
| TimeoutGuard |
python | jazzband__django-formtools | tests/wizard/wizardtests/models.py | {
"start": 31,
"end": 201
} | class ____(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'formtools'
def __str__(self):
return self.name
| Poet |
python | walkccc__LeetCode | solutions/2503. Maximum Number of Points From Grid Queries/2503.py | {
"start": 183,
"end": 1337
} | class ____:
def maxPoints(self, grid: list[list[int]], queries: list[int]) -> list[int]:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(grid)
n = len(grid[0])
ans = [0] * len(queries)
minHeap = [(grid[0][0], 0, 0)] # (grid[i][j], i, j)
seen = {(0, 0)}
accumulate = 0
for queryIndex, query in sorted([IndexedQuery(i, query)
for i, query in enumerate(queries)],
key=lambda x: x.query):
while minHeap:
val, i, j = heapq.heappop(minHeap)
if val >= query:
# The smallest neighbor is still larger than `query`, so no need to
# keep exploring. Re-push (i, j, grid[i][j]) back to the `minHeap`.
heapq.heappush(minHeap, (val, i, j))
break
accumulate += 1
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
if (x, y) in seen:
continue
heapq.heappush(minHeap, (grid[x][y], x, y))
seen.add((x, y))
ans[queryIndex] = accumulate
return ans
| Solution |
python | pytorch__pytorch | test/dynamo/test_misc.py | {
"start": 433563,
"end": 435942
} | class ____(torch.testing._internal.common_utils.TestCase):
def test_autograd_function_with_matmul_folding_at_output(self):
"""
When tensor folding occurs during matmul operation returned tensor is a view.
This can cause issues when matmul is used inside a custom function
and such view is then returned as output. Then it cannot be modified inplace
and causes errors.
It can be especially problematic when after such function inplace allreduce
is performed. This test recreates this behaviour.
Issue is resolved when unsafe_view is returned from matmul instead.
"""
class CustomFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, inp1, inp2):
ctx.save_for_backward(inp2)
ctx.output_shape = inp1.size()
return torch.matmul(inp1, inp2)
@staticmethod
def backward(ctx, grad_output):
output_shape = ctx.output_shape
(inp2,) = ctx.saved_tensors
return (
torch.mm(grad_output.squeeze(), inp2.t()).view(output_shape),
None,
)
def outer_function(inp1, inp2):
res = CustomFunction.apply(inp1, inp2)
res.add_(1.0)
return res.sum()
def usual_function(inp1, inp2) -> torch.Tensor:
res = torch.matmul(inp1, inp2)
res.add_(1.0)
return res.sum()
inp1_custom = torch.randn(4, 1, 2, requires_grad=True)
inp1_usual = inp1_custom.detach().clone().requires_grad_(True)
inp2 = torch.randn(2, 4)
c_custom_func = torch.compile(outer_function)
c_usual_func = torch.compile(usual_function)
result_custom = c_custom_func(inp1_custom, inp2)
result_custom.backward()
result_usual = c_usual_func(inp1_usual, inp2)
result_usual.backward()
torch.allclose(inp1_custom.grad, inp1_usual.grad)
def test_retain_grad(self):
def fn(x, y):
y.retain_grad()
return torch.sin(y) + x
opt_fn = torch.compile(fn, backend="aot_eager")
x = torch.randn(4, requires_grad=True)
y = torch.cos(x)
opt_fn(x, y).sum().backward()
self.assertTrue(y.grad is not None)
| TestCustomFunction |
python | doocs__leetcode | solution/0000-0099/0047.Permutations II/Solution.py | {
"start": 0,
"end": 584
} | class ____:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
def dfs(i: int):
if i == n:
ans.append(t[:])
return
for j in range(n):
if vis[j] or (j and nums[j] == nums[j - 1] and not vis[j - 1]):
continue
t[i] = nums[j]
vis[j] = True
dfs(i + 1)
vis[j] = False
n = len(nums)
nums.sort()
ans = []
t = [0] * n
vis = [False] * n
dfs(0)
return ans
| Solution |
python | jina-ai__jina | jina/serve/runtimes/gateway/grpc/__init__.py | {
"start": 145,
"end": 306
} | class ____(GRPCServer, BaseGateway):
"""
:class:`GRPCGateway` is a GRPCServer that can be loaded from YAML as any other Gateway
"""
pass
| GRPCGateway |
python | kamyu104__LeetCode-Solutions | Python/maximum-repeating-substring.py | {
"start": 1238,
"end": 2275
} | class ____(object):
def maxRepeating(self, sequence, word):
"""
:type sequence: str
:type word: str
:rtype: int
"""
def getPrefix(pattern):
prefix = [-1] * len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j > -1 and pattern[j + 1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
if len(sequence) < len(word):
return 0
new_word = word*(len(sequence)//len(word))
prefix = getPrefix(new_word)
result, j = 0, -1
for i in xrange(len(sequence)):
while j > -1 and new_word[j+1] != sequence[i]:
j = prefix[j]
if new_word[j+1] == sequence[i]:
j += 1
result = max(result, j+1)
if j+1 == len(new_word):
break
return result//len(word)
| Solution2 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec19.py | {
"start": 491,
"end": 897
} | class ____:
def func1(self, handler: CommandHandler1[P]) -> Command1[P]:
return Command1(handler)
def func2(
self,
handler: CommandHandler1[P],
) -> Callable[[CommandHandler1[P]], Command1[P]]:
def decorator(handler: CommandHandler1[P]) -> Command1[P]:
return self.func1(handler)
return decorator
# Example 2: Callback Protocol
| Application1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.